code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''mvp'''
_lowercase : Any = ['''past_key_values''']
_lowercase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=50_267 , _lowercase=1_024 , _lowercase=12 , _lowercase=4_096 , _lowercase=16 , _lowercase=12 , _lowercase=4_096 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase="gelu" , _lowercase=1_024 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=False , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=True , _lowercase=2 , _lowercase=2 , _lowercase=False , _lowercase=100 , _lowercase=800 , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = use_prompt
_lowerCAmelCase = prompt_length
_lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowercase ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 5
|
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def _A ( SCREAMING_SNAKE_CASE : int = 10_000 ):
"""simple docstring"""
a__ : List[Any] =sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 563
| 0
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE__ = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE__ = 0.01
with locka.acquire():
with pytest.raises(snake_case__ ):
SCREAMING_SNAKE_CASE__ = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """a""" * 10_00 + """.lock"""
SCREAMING_SNAKE_CASE__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
SCREAMING_SNAKE_CASE__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 616
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=3_7 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Any = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE__ = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
| 616
| 1
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = "utf-8"
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = True # deprecated
__lowerCAmelCase = None # deprecated
__lowerCAmelCase = 1_0 << 2_0 # 10MB
__lowerCAmelCase = None
class a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCAmelCase = JsonConfig
def lowercase_ ( self ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
__UpperCAmelCase: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__UpperCAmelCase: str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__UpperCAmelCase: Optional[int] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase: Optional[int] = [files]
__UpperCAmelCase: int = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__UpperCAmelCase: List[Any] = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase: Optional[int] = [files]
__UpperCAmelCase: Optional[Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCAmelCase: List[str] = self.config.features.arrow_schema.field(__SCREAMING_SNAKE_CASE ).type
__UpperCAmelCase: str = pa_table.append_column(__SCREAMING_SNAKE_CASE , pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=__SCREAMING_SNAKE_CASE ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase: Optional[int] = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase: Tuple = json.load(__SCREAMING_SNAKE_CASE )
# We keep only the field we are interested in
__UpperCAmelCase: List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
__UpperCAmelCase: List[str] = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase: List[str] = {col: [row.get(__SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
else:
__UpperCAmelCase: int = dataset
__UpperCAmelCase: Optional[Any] = pa.Table.from_pydict(__SCREAMING_SNAKE_CASE )
yield file_idx, self._cast_table(__SCREAMING_SNAKE_CASE )
# If the file has one json object per line
else:
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__UpperCAmelCase: int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase: str = max(self.config.chunksize // 32 , 16 << 10 )
__UpperCAmelCase: str = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
__UpperCAmelCase: List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__SCREAMING_SNAKE_CASE )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase: Optional[Any] = batch.decode(self.config.encoding , errors=__SCREAMING_SNAKE_CASE ).encode("""utf-8""" )
try:
while True:
try:
__UpperCAmelCase: Optional[Any] = paj.read_json(
io.BytesIO(__SCREAMING_SNAKE_CASE ) , read_options=paj.ReadOptions(block_size=__SCREAMING_SNAKE_CASE ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__SCREAMING_SNAKE_CASE , pa.ArrowInvalid )
and "straddling" not in str(__SCREAMING_SNAKE_CASE )
or block_size > len(__SCREAMING_SNAKE_CASE )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__SCREAMING_SNAKE_CASE )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase: List[Any] = json.load(__SCREAMING_SNAKE_CASE )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase: int = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase: Optional[Any] = {col: [row.get(__SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
__UpperCAmelCase: List[str] = pa.Table.from_pydict(__SCREAMING_SNAKE_CASE )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__SCREAMING_SNAKE_CASE )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__SCREAMING_SNAKE_CASE )
batch_idx += 1
| 523
|
lowercase__ : Union[str, Any] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 312
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : int = 0
__a : bool = False
__a : float = 3.0
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ : int = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_ : Dict = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_ : Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCamelCase =Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase =torch.nn.Linear(100, 200)
UpperCamelCase =accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase =""
UpperCamelCase =model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 543
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase ="https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase =BASE_URL + "/user"
# https://github.com/settings/tokens
UpperCamelCase =os.environ.get("USER_TOKEN", "")
def snake_case ( a_ : str ) -> dict[Any, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = {
"""Authorization""": f"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(a_ , headers=a_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 543
| 1
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Optional[Any] = model.config
__A : List[Any] = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, )
__A : Any = MBartConfig(
is_decoder=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, add_cross_attention=UpperCamelCase__, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=UpperCamelCase__, add_final_layer_norm=UpperCamelCase__, )
return encoder_config, decoder_config
def _lowercase ( UpperCamelCase__ : Any ):
if "encoder.model" in name:
__A : Tuple = name.replace('encoder.model', 'encoder' )
if "decoder.model" in name:
__A : Any = name.replace('decoder.model', 'decoder' )
if "patch_embed.proj" in name:
__A : Union[str, Any] = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__A : Tuple = name.replace('patch_embed.norm', 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
__A : Any = 'encoder.' + name
if "attn.proj" in name:
__A : Tuple = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name and "mask" not in name:
__A : Any = name.replace('attn', 'attention.self' )
if "norm1" in name:
__A : Any = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__A : Union[str, Any] = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__A : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Tuple = name.replace('mlp.fc2', 'output.dense' )
if name == "encoder.norm.weight":
__A : List[str] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
__A : int = 'encoder.layernorm.bias'
return name
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : str ):
for key in orig_state_dict.copy().keys():
__A : Union[str, Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
__A : str = key.split('.' )
__A : int = int(key_split[3] )
__A : List[str] = int(key_split[5] )
__A : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A : Union[str, Any] = val[:dim, :]
__A : Any = val[dim : dim * 2, :]
__A : Tuple = val[-dim:, :]
else:
__A : int = val[:dim]
__A : Dict = val[dim : dim * 2]
__A : Optional[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__A : Tuple = val
return orig_state_dict
def _lowercase ( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[Any]=False ):
# load original model
__A : List[Any] = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
__A ,__A : List[Any] = get_configs(UpperCamelCase__ )
__A : Tuple = DonutSwinModel(UpperCamelCase__ )
__A : Tuple = MBartForCausalLM(UpperCamelCase__ )
__A : Any = VisionEncoderDecoderModel(encoder=UpperCamelCase__, decoder=UpperCamelCase__ )
model.eval()
__A : Optional[Any] = original_model.state_dict()
__A : Optional[int] = convert_state_dict(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
__A : Optional[int] = load_dataset('hf-internal-testing/example-documents' )
__A : List[str] = dataset['test'][0]['image'].convert('RGB' )
__A : Tuple = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__, from_slow=UpperCamelCase__ )
__A : Optional[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
__A : str = DonutProcessor(UpperCamelCase__, UpperCamelCase__ )
__A : int = processor(UpperCamelCase__, return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__A : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__A : Any = 'When is the coffee break?'
__A : Dict = task_prompt.replace('{user_input}', UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__A : Tuple = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__A : str = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__A : Union[str, Any] = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__A : Any = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__A : Tuple = 'hello world'
else:
raise ValueError('Model name not supported' )
__A : Any = original_model.decoder.tokenizer(UpperCamelCase__, add_special_tokens=UpperCamelCase__, return_tensors='pt' )[
'input_ids'
]
__A : Dict = original_model.encoder.model.patch_embed(UpperCamelCase__ )
__A ,__A : Any = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-3 )
# verify encoder hidden states
__A : str = original_model.encoder(UpperCamelCase__ )
__A : Optional[int] = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-2 )
# verify decoder hidden states
__A : Optional[Any] = original_model(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ).logits
__A : List[Any] = model(UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1], commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1], commit_message='Update model' )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
UpperCAmelCase_ : int = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = 'https://openaipublic.azureedge.net/jukebox/models/'
UpperCAmelCase_ : Tuple = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _lowercase ( UpperCamelCase__ : str ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : str = key.replace('.model.1.bias', '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Dict = key.replace('.model.1.weight', '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : Optional[int] = key.replace('.model.3.bias', '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : str = key.replace('.model.3.weight', '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Tuple = key.replace('conditioner_blocks.0', 'conditioner_blocks' )
if "prime_prior" in key:
__A : str = key.replace('prime_prior', 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : str = key.replace('.emb.', '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k', '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.', 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : str = key.replace('0.x_emb.emb', 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln', 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln', '.layer_norm' )
if "_ln" in key:
return key.replace('_ln', '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj', 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out', 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out', 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb', 'embed_tokens' )
return key
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any] ):
__A : Union[str, Any] = {}
import re
__A : List[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : List[str] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : str = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[int] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Union[str, Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : List[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase__ ):
__A : Any = re_encoder_block_conv_in.match(UpperCamelCase__ )
__A : int = regex_match.groups()
__A : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__A : int = re_encoder_block_conv_in.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase__ ):
__A : Any = re_encoder_block_resnet.match(UpperCamelCase__ )
__A : Any = regex_match.groups()
__A : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : str = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[int] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__A : str = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : List[Any] = prefix + resnet_block
__A : Union[str, Any] = re_encoder_block_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase__ ):
__A : List[str] = re_encoder_block_proj_out.match(UpperCamelCase__ )
__A : Optional[int] = regex_match.groups()
__A : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__A : Optional[Any] = re_encoder_block_proj_out.sub(UpperCamelCase__, UpperCamelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase__ ):
__A : Dict = re_decoder_block_conv_out.match(UpperCamelCase__ )
__A : int = regex_match.groups()
__A : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__A : Optional[Any] = re_decoder_block_conv_out.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase__ ):
__A : List[str] = re_decoder_block_resnet.match(UpperCamelCase__ )
__A : List[str] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__A : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Union[str, Any] = prefix + resnet_block
__A : Optional[int] = re_decoder_block_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase__ ):
__A : int = re_decoder_block_proj_in.match(UpperCamelCase__ )
__A : List[str] = regex_match.groups()
__A : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__A : Optional[Any] = re_decoder_block_proj_in.sub(UpperCamelCase__, UpperCamelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase__ ):
__A : Optional[int] = re_prior_cond_conv_out.match(UpperCamelCase__ )
__A : Union[str, Any] = regex_match.groups()
__A : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : List[str] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__A : List[str] = re_prior_cond_conv_out.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase__ ):
__A : str = re_prior_cond_resnet.match(UpperCamelCase__ )
__A : Dict = regex_match.groups()
__A : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Dict = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__A : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Union[str, Any] = prefix + resnet_block
__A : str = re_prior_cond_resnet.sub(UpperCamelCase__, UpperCamelCase__ )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase__ ):
__A : Tuple = re_prior_cond_proj_in.match(UpperCamelCase__ )
__A : Optional[Any] = regex_match.groups()
__A : Optional[int] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__A : List[Any] = re_prior_cond_proj_in.sub(UpperCamelCase__, UpperCamelCase__ )
# keep original key
else:
__A : List[Any] = original_key
__A : int = replace_key(UpperCamelCase__ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__A : Optional[Any] = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__A : Union[str, Any] = original_key
__A : Optional[int] = original_key
__A : Optional[Any] = value
return new_dict
@torch.no_grad()
def _lowercase ( UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
__A : Any = requests.get(f"""{PREFIX}{file}""", allow_redirects=UpperCamelCase__ )
os.makedirs(f"""{pytorch_dump_folder_path}/""", exist_ok=UpperCamelCase__ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""", 'wb' ).write(r.content )
__A : str = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Optional[Any] = JukeboxConfig.from_pretrained(UpperCamelCase__ )
__A : Tuple = JukeboxModel(UpperCamelCase__ )
__A : List[Any] = []
__A : int = {}
for i, dict_name in enumerate(UpperCamelCase__ ):
__A : List[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
__A : Dict = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : str = old_dic[k]
elif k.endswith('.w' ):
__A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Optional[Any] = old_dic[k]
else:
__A : str = old_dic[k]
__A : List[Any] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
__A : Any = fix_jukebox_keys(UpperCamelCase__, model.state_dict(), UpperCamelCase__, UpperCamelCase__ )
weight_dict.append(UpperCamelCase__ )
__A : Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""", 'w' ) as txtfile:
json.dump(UpperCamelCase__, UpperCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 365
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ :
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=3_2 , SCREAMING_SNAKE_CASE :str=1_6 , SCREAMING_SNAKE_CASE :Tuple=3 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :List[Any]=3_2 , SCREAMING_SNAKE_CASE :Optional[Any]=4 , SCREAMING_SNAKE_CASE :Any=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE :List[Any]=4 , SCREAMING_SNAKE_CASE :Dict=3_7 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :Dict=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.02 , SCREAMING_SNAKE_CASE :Union[str, Any]=3 , SCREAMING_SNAKE_CASE :Tuple=[1, 3_8_4, 2_4, 2_4] , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : Tuple =batch_size
_a : Union[str, Any] =image_size
_a : Optional[int] =patch_size
_a : List[Any] =num_channels
_a : Tuple =is_training
_a : List[str] =use_labels
_a : Union[str, Any] =hidden_size
_a : Optional[int] =num_hidden_layers
_a : Tuple =backbone_out_indices
_a : Optional[Any] =num_attention_heads
_a : Optional[int] =intermediate_size
_a : Tuple =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : str =attention_probs_dropout_prob
_a : str =initializer_range
_a : str =num_labels
_a : Any =backbone_featmap_shape
_a : Optional[int] =scope
_a : Any =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : Tuple =(image_size // patch_size) ** 2
_a : str =num_patches + 1
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str =None
if self.use_labels:
_a : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Dict =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] ={
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : str =DPTModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> int:
'''simple docstring'''
_a : List[str] =self.num_labels
_a : str =DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.num_labels
_a : str =DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
_a : List[str] =self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] =config_and_inputs
_a : int ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a : List[str] =DPTModelTester(self )
_a : List[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
_a , _a : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int =model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
_a , _a : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str =model_class(SCREAMING_SNAKE_CASE )
_a : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] =[*signature.parameters.keys()]
_a : Optional[int] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
_a : Any =True
if model_class in get_values(SCREAMING_SNAKE_CASE ):
continue
_a : Dict =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
_a : List[Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : List[str] =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] =False
_a : Any =True
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
_a : Optional[int] =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
_a : Any =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : Optional[int] =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a , _a : int =self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] =_config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_a : List[str] =model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
_a : Optional[Any] =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : str =[f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : List[Any] =DPTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] ="""add"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
_a : Optional[Any] =DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_a : Any =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[Any] =DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_a : Tuple =DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(SCREAMING_SNAKE_CASE )
_a : List[str] =prepare_img()
_a : str =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : Union[str, Any] =model(**SCREAMING_SNAKE_CASE )
_a : List[Any] =outputs.predicted_depth
# verify the predicted depth
_a : str =torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE )
_a : List[str] =torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 506
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = '''backbone.''' if is_semantic else ''''''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(F'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(F'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(F'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
for i in range(config.num_hidden_layers ):
snake_case_ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
snake_case_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = q_bias
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
snake_case_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
snake_case_ = gamma_a
snake_case_ = gamma_a
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = False if '''rvlcdip''' in checkpoint_url else True
snake_case_ = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ = 16
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''rvlcdip-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
snake_case_ = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
snake_case_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits
# verify logits
snake_case_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
snake_case_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
snake_case_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCAmelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_SCREAMING_SNAKE_CASE : List[str] = ['''small''', '''medium''', '''large''']
_SCREAMING_SNAKE_CASE : int = '''lm_head.decoder.weight'''
_SCREAMING_SNAKE_CASE : Any = '''lm_head.weight'''
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ )
SCREAMING_SNAKE_CASE__ = d.pop(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_SCREAMING_SNAKE_CASE : str = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
_SCREAMING_SNAKE_CASE : Union[str, Any] = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 700
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
a = Features({"question": Value("string" ), "context": Value("string" )} )
a = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a = "question"
a = "context"
a = "answers"
@property
def lowercase_ ( self : Dict ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 472
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : Optional[Any] = '''nat'''
__lowercase : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = kernel_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = layer_scale_init_value
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 24
|
"""simple docstring"""
import socket
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
UpperCAmelCase_ :int = socket.gethostname()
UpperCAmelCase_ :List[Any] = 12312
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase_ :int = sock.recv(1024 )
if not data:
break
out_file.write(__snake_case )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 608
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Optional[int] = None
_a : Optional[jnp.ndarray] = None
_a : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCAmelCase__( cls ) -> str:
return cls()
@dataclass
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : jnp.ndarray
_a : jnp.ndarray
_a : KarrasVeSchedulerState
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase__( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 100 , lowerCamelCase__ = 1.007 , lowerCamelCase__ = 80 , lowerCamelCase__ = 0.05 , lowerCamelCase__ = 50 , ) -> List[str]:
pass
def UpperCAmelCase__( self ) -> Dict:
return KarrasVeSchedulerState.create()
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = () ) -> KarrasVeSchedulerState:
lowercase__ : List[str] = jnp.arange(0 , lowerCamelCase__ )[::-1].copy()
lowercase__ : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCamelCase__ , schedule=jnp.array(lowerCamelCase__ , dtype=jnp.floataa ) , timesteps=lowerCamelCase__ , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : int = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any] = random.split(lowerCamelCase__ , num=1 )
lowercase__ : Any = self.config.s_noise * random.normal(key=lowerCamelCase__ , shape=sample.shape )
lowercase__ : Tuple = sigma + gamma * sigma
lowercase__ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowercase__ : List[str] = sample_hat + sigma_hat * model_output
lowercase__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , state=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowercase__ : Union[str, Any] = sample_prev + sigma_prev * model_output
lowercase__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , state=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
raise NotImplementedError()
| 712
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__snake_case = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__snake_case = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__snake_case = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
lowercase__ : List[Any] = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 128
| 0
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
a_ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ = 'allenai'
def lowerCamelCase__ ( _a):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE : Union[str, Any] = dict((re.sub(r"@@$" , "" , _a), v) if k.endswith("@@") else (re.sub(r"$" , "</w>" , _a), v) for k, v in d.items())
SCREAMING_SNAKE_CASE : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
SCREAMING_SNAKE_CASE : Optional[int] = d[k] # restore
return da
def lowerCamelCase__ ( _a , _a):
# prep
assert os.path.exists(_a)
os.makedirs(_a , exist_ok=_a)
print(f"Writing results to {pytorch_dump_folder_path}")
# handle various types of models
SCREAMING_SNAKE_CASE : Dict = basename(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = dirname(_a)
SCREAMING_SNAKE_CASE : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE : int = cls.hub_models()
SCREAMING_SNAKE_CASE : List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
SCREAMING_SNAKE_CASE : List[Any] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}")
SCREAMING_SNAKE_CASE : Dict = hub_utils.from_pretrained(
_a , _a , _a , archive_map=_a , **_a)
SCREAMING_SNAKE_CASE : int = vars(chkpt["args"]["model"])
SCREAMING_SNAKE_CASE : Union[str, Any] = args["source_lang"]
SCREAMING_SNAKE_CASE : List[Any] = args["target_lang"]
SCREAMING_SNAKE_CASE : Any = dirname(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = basename(_a)
# dicts
SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , f"dict.{src_lang}.txt")
SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , f"dict.{tgt_lang}.txt")
SCREAMING_SNAKE_CASE : Dict = Dictionary.load(_a)
SCREAMING_SNAKE_CASE : str = rewrite_dict_keys(src_dict.indices)
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "vocab-src.json")
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE : Tuple = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE : Dict = False
break
SCREAMING_SNAKE_CASE : List[Any] = Dictionary.load(_a)
SCREAMING_SNAKE_CASE : Optional[int] = rewrite_dict_keys(tgt_dict.indices)
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a)
SCREAMING_SNAKE_CASE : int = os.path.join(_a , "vocab-tgt.json")
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_a , VOCAB_FILES_NAMES["merges_file"])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_a , _a)
if os.path.exists(_a):
break
with open(_a , encoding="utf-8") as fin:
SCREAMING_SNAKE_CASE : Optional[Any] = fin.read()
SCREAMING_SNAKE_CASE : Dict = re.sub(r" \d+$" , "" , _a , 0 , re.M) # remove frequency number
print(f"Generating {merges_file}")
with open(_a , "w" , encoding="utf-8") as fout:
fout.write(_a)
# model config
SCREAMING_SNAKE_CASE : Dict = os.path.join(_a , "config.json")
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
SCREAMING_SNAKE_CASE : int = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE : List[Any] = 5
SCREAMING_SNAKE_CASE : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE : Tuple = best_score_hparams[model_dir]["length_penalty"]
else:
SCREAMING_SNAKE_CASE : int = 1.0
print(f"Generating {fsmt_model_config_file}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# tokenizer config
SCREAMING_SNAKE_CASE : int = os.path.join(_a , _a)
SCREAMING_SNAKE_CASE : Dict = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a))
# model
SCREAMING_SNAKE_CASE : Tuple = chkpt["models"][0]
SCREAMING_SNAKE_CASE : List[Any] = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE : Any = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
SCREAMING_SNAKE_CASE : Optional[int] = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_a , _a)
SCREAMING_SNAKE_CASE : Any = FSMTConfig.from_pretrained(_a)
SCREAMING_SNAKE_CASE : int = FSMTForConditionalGeneration(_a)
# check that it loads ok
model_new.load_state_dict(_a , strict=_a)
# save
SCREAMING_SNAKE_CASE : Tuple = os.path.join(_a , _a)
print(f"Generating {pytorch_weights_dump_path}")
torch.save(_a , _a)
print("Conversion is done!")
print("\nLast step is to upload the files to s3")
print(f"cd {data_root}")
print(f"transformers-cli upload {model_dir}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 25
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25
| 1
|
import operator as op
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = []
snake_case__ = lambda __lowerCAmelCase , __lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation
snake_case__ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
else:
snake_case__ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
snake_case__ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowerCAmelCase ) , int(__lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__magic_name__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 530
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = ReformerTokenizer
_A : str = ReformerTokenizerFast
_A : List[str] = True
_A : Tuple = False
_A : str = True
def A_ ( self ):
super().setUp()
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
snake_case__ = "<s>"
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 10_00 )
def A_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = "I was born in 92000, and this is falsé."
snake_case__ = tokenizer.tokenize(lowerCamelCase )
snake_case__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
snake_case__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def A_ ( self ):
snake_case__ = "Hello World!"
snake_case__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A_ ( self ):
snake_case__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case__ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def A_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = " ".join(lowerCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence["input_ids"].shape
snake_case__ = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def A_ ( self ):
# fmt: off
snake_case__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 530
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : int , a : Tuple , a : Union[str, Any]=7 , a : Optional[Any]=3 , a : Tuple=1_8 , a : str=3_0 , a : Dict=4_0_0 , a : List[Any]=True , a : Dict=3_2 , a : List[str]=True , ):
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : Tuple = batch_size
lowercase_ : str = num_channels
lowercase_ : Dict = image_size
lowercase_ : Any = min_resolution
lowercase_ : List[str] = max_resolution
lowercase_ : Any = do_resize
lowercase_ : Optional[Any] = size_divisor
lowercase_ : List[Any] = do_rescale
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: int = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
self.assertTrue(hasattr(a , "resample" ) )
self.assertTrue(hasattr(a , "do_rescale" ) )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 620
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = ort.SessionOptions()
lowercase_ : Optional[int] = False
return options
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowercase_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowercase_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
lowercase_ : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[str] = "A red cat sitting on a park bench"
lowercase_ : int = np.random.RandomState(0 )
lowercase_ : int = pipe(
prompt=a , image=a , mask_image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=a , output_type="np" , )
lowercase_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 620
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : List[str] , a__ : Dict[str, int] , a__ : List[str] , a__ : int = None , a__ : int = None ):
"""simple docstring"""
super().__init__()
__snake_case = pad_token_id
__snake_case = max_length
__snake_case = vocab
__snake_case = merges
__snake_case = BytePairTokenizer(a__ , a__ , sequence_length=a__ )
@classmethod
def a (cls : str , a__ : GPTaTokenizer , *a__ : List[str] , **a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = [''' '''.join(a__ ) for m in tokenizer.bpe_ranks.keys()]
__snake_case = tokenizer.get_vocab()
return cls(a__ , a__ , *a__ , **a__ )
@classmethod
def a (cls : Union[str, Any] , a__ : Union[str, os.PathLike] , *a__ : List[Any] , **a__ : Tuple ):
"""simple docstring"""
__snake_case = GPTaTokenizer.from_pretrained(a__ , *a__ , **a__ )
return cls.from_tokenizer(a__ , *a__ , **a__ )
@classmethod
def a (cls : Dict , a__ : Dict ):
"""simple docstring"""
return cls(**a__ )
def a (self : str ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a (self : Optional[Any] , a__ : int , a__ : int = None ):
"""simple docstring"""
__snake_case = self.tf_tokenizer(a__ )
__snake_case = tf.ones_like(a__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case , __snake_case = pad_model_inputs(
a__ , max_seq_length=a__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 388
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = 'new-model'
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForCausalLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def a (self : List[str] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a__ , a__ )
__snake_case = copy.deepcopy(model.config )
__snake_case = ['''FunnelBaseModel''']
__snake_case = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : str ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , a__ )
__snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = BertModelTester(self ).get_config()
__snake_case = NewModelConfig(**tiny_config.to_dict() )
__snake_case = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case = TFAutoModel.from_pretrained('''bert-base''' )
def a (self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = TFAutoModel.from_pretrained(a__ , revision='''aaaaaa''' )
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def a (self : int ):
"""simple docstring"""
with self.assertRaisesRegex(a__ , '''Use `from_pt=True` to load this model''' ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 388
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'BlipImageProcessor'
__magic_name__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
_A = self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
_A = qformer_text_encoding.pop('input_ids' )
_A = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_A = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self ):
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_A = os.path.join(snake_case_ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase__ ( cls , snake_case_ , **snake_case_ ):
_A = AutoTokenizer.from_pretrained(snake_case_ , subfolder='qformer_tokenizer' )
_A = cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 27
|
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def _A ( SCREAMING_SNAKE_CASE : int = 10_000 ):
"""simple docstring"""
a__ : List[Any] =sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 563
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = """trocr"""
__magic_name__ :Any = ["""past_key_values"""]
__magic_name__ :Tuple = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , __UpperCAmelCase=5_0_2_6_5 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase="gelu" , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :Optional[Any] = d_model
lowerCAmelCase__ :Optional[Any] = decoder_layers
lowerCAmelCase__ :Tuple = decoder_attention_heads
lowerCAmelCase__ :Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ :Tuple = activation_function
lowerCAmelCase__ :str = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = dropout
lowerCAmelCase__ :Optional[int] = attention_dropout
lowerCAmelCase__ :List[str] = activation_dropout
lowerCAmelCase__ :Tuple = init_std
lowerCAmelCase__ :Optional[int] = decoder_layerdrop
lowerCAmelCase__ :Tuple = use_cache
lowerCAmelCase__ :List[Any] = scale_embedding
lowerCAmelCase__ :Optional[Any] = use_learned_position_embeddings
lowerCAmelCase__ :Optional[Any] = layernorm_embedding
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 560
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A = 3
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
lowerCAmelCase__ :Dict = random.randrange(3 , _SCREAMING_SNAKE_CASE )
if pow(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE ) == 1:
continue
if pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) == 1:
continue
return g
def __A (_SCREAMING_SNAKE_CASE ) ->tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('Generating prime p...' )
lowerCAmelCase__ :Dict = rabin_miller.generate_large_prime(_SCREAMING_SNAKE_CASE ) # select large prime number.
lowerCAmelCase__ :Tuple = primitive_root(_SCREAMING_SNAKE_CASE ) # one primitive root on modulo p.
lowerCAmelCase__ :List[Any] = random.randrange(3 , _SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase__ :int = cryptomath.find_mod_inverse(pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = (key_size, e_a, e_a, p)
lowerCAmelCase__ :List[str] = (key_size, d)
return public_key, private_key
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCAmelCase__ , lowerCAmelCase__ :Dict = generate_key(_SCREAMING_SNAKE_CASE )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , 'w' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , 'w' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def __A () ->None:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 560
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCAmelCase = {'''UserAgent''': UserAgent().random}
def lowercase__ ( lowerCAmelCase__ : int ) -> dict:
'''simple docstring'''
a__ : Optional[Any] = script.contents[0]
a__ : Tuple = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCAmelCase :
def __init__( self : Optional[Any] , a_ : Tuple ) -> Tuple:
'''simple docstring'''
a__ : Tuple = F"https://www.instagram.com/{username}/"
a__ : List[str] = self.get_json()
def UpperCAmelCase ( self : Optional[int] ) -> dict:
'''simple docstring'''
a__ : Optional[Any] = requests.get(self.url , headers=a_ ).text
a__ : int = BeautifulSoup(a_ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[Any] ) -> str:
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase ( self : Tuple ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def UpperCAmelCase ( self : str ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def lowercase__ ( lowerCAmelCase__ : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
a__ : Tuple = InstagramUser(lowerCAmelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 251
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : nn.ModuleList , lowerCAmelCase__ : nn.ModuleList , lowerCAmelCase__ : List[int] ) -> None:
'''simple docstring'''
a__ : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), F"{len(lowerCAmelCase__ )} != {len(lowerCAmelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
try:
a__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase__ ) )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase__ ( lowerCAmelCase__ : Union[str, PreTrainedModel] , lowerCAmelCase__ : Union[str, Path] = "student" , lowerCAmelCase__ : Union[int, None] = None , lowerCAmelCase__ : Union[int, None] = None , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Dict , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
a__ : int = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
AutoTokenizer.from_pretrained(lowerCAmelCase__ ).save_pretrained(lowerCAmelCase__ ) # purely for convenience
a__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).eval()
else:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), F"teacher must be a model or string got type {type(lowerCAmelCase__ )}"
a__ : Any = teacher.config.to_diff_dict()
try:
a__ , a__ : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ : Union[str, Any] = teacher_e
if d is None:
a__ : Optional[int] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
a__ , a__ : Optional[int] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ : int = teacher_e
if d is None:
a__ : Tuple = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase__ )
# Copy weights
a__ : Optional[int] = teacher.config_class(**lowerCAmelCase__ )
a__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ : int = list(range(lowerCAmelCase__ ) ), list(range(lowerCAmelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(lowerCAmelCase__ , lowerCAmelCase__ )
if d_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(lowerCAmelCase__ , lowerCAmelCase__ )
try:
if hasattr(
lowerCAmelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
a__ : Optional[Any] = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 251
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ ( unittest.TestCase ):
def __init__( self : Dict , snake_case_ : List[str] , snake_case_ : List[str]=1_3 , snake_case_ : Optional[int]=3_0 , snake_case_ : str=2 , snake_case_ : Optional[Any]=3 , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=3_2 , snake_case_ : Any=5 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]=3_7 , snake_case_ : int="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=1_0 , snake_case_ : Any=0.0_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowercase ( self : Any ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
_UpperCAmelCase = FlaxViTModel(config=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(snake_case_ )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase ( self : Any ):
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ : List[Any] , **snake_case_ : Dict ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase ( self : str ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("google/vit-base-patch16-224" )
_UpperCAmelCase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(snake_case_ )
| 236
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__SCREAMING_SNAKE_CASE :Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Union[str, Any]=100 , __lowercase : Dict=" " ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = text.split(__lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )]
def UpperCAmelCase_ ( __lowercase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowercase ):
titles.append(title if title is not None else "" )
texts.append(__lowercase )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __lowercase : dict , __lowercase : DPRContextEncoder , __lowercase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
_UpperCAmelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __lowercase : "RagExampleArguments" , __lowercase : "ProcessingArguments" , __lowercase : "IndexHnswArguments" , ) -> Any:
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase )
_UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase = dataset.map(
partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , )
# And finally save your dataset
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowercase )
# And save the index
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
_lowerCamelCase : str = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase : Optional[str] = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class A_ :
_lowerCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class A_ :
_lowerCamelCase : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__SCREAMING_SNAKE_CASE :Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE :Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 236
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ = 25_00_04
UpperCAmelCase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = MBartaaTokenizer
_a = MBartaaTokenizerFast
_a = True
_a = True
def UpperCamelCase__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__a = MBartaaTokenizer(UpperCamelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Dict:
__a = '<s>'
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[str]:
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCamelCase ) , 1054 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = MBartaaTokenizer(UpperCamelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCamelCase )
__a = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__a = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCamelCase__ ( self ) -> Dict:
# fmt: off
__a = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__a = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
_a = """facebook/mbart-large-50-one-to-many-mmt"""
_a = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_a = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_a = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[int]:
__a = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a = 1
return cls
def UpperCamelCase__ ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def UpperCamelCase__ ( self ) -> int:
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
__a = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
__a = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
__a = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCamelCase )
__a = 10
__a = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase )
__a = MBartaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors='pt' )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self ) -> int:
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors='pt' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors='pt' )
__a = targets['input_ids']
__a = shift_tokens_right(UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ) -> Dict:
__a = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 490
| 1
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = VideoToVideoSDPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
A_ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : Dict = False
# No `output_type`.
A_ : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=0 ) -> Optional[int]:
# 3 frames
A = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = VideoToVideoSDPipeline(**__UpperCamelCase )
A = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 'np'
A = sd_pipe(**__UpperCamelCase ).frames
A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : int ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : str ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Any ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = torch.randn((1, 10, 3, 1_024, 576) , generator=__UpperCamelCase )
A = video.to('cuda' )
A = 'Spiderman is surfing'
A = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
A = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 106
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( A__, A__, A__, A__, A__=True, A__="pt" ):
SCREAMING_SNAKE_CASE_ : Tuple = {'add_prefix_space': True} if isinstance(A__, A__ ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ : Dict = padding_side
return tokenizer(
[line], max_length=A__, padding='max_length' if pad_to_max_length else None, truncation=A__, return_tensors=A__, add_special_tokens=A__, **A__, )
def a__ ( A__, A__, A__=None, ):
SCREAMING_SNAKE_CASE_ : int = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(lowerCAmelCase__ ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ : int = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ : int = max_source_length
SCREAMING_SNAKE_CASE_ : Dict = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ : List[str] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE_ : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ : Tuple = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ : List[str] = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : Tuple =getLogger(__name__)
def a__ ( A__ ):
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_git_info()
save_json(A__, os.path.join(A__, 'git_log.json' ) )
def a__ ( A__, A__, A__=4, **A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__, indent=A__, **A__ )
def a__ ( A__ ):
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE_ : Any = {
'repo_id': str(A__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( A__, A__ ):
return list(map(A__, A__ ) )
def a__ ( A__, A__ ):
with open(A__, 'wb' ) as f:
return pickle.dump(A__, A__ )
def a__ ( A__ ):
def remove_articles(A__ ):
return re.sub(r'\b(a|an|the)\b', ' ', A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : List[str] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : int = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( A__, A__ ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( A__, A__ ):
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE_ : Any = 0
for hypo, pred in zip(A__, A__ ):
em += exact_match_score(A__, A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( A__ ):
return model_prefix.startswith('rag' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ : List[Any] = 'dropout_rate'
for p in extra_params:
if getattr(A__, A__, A__ ):
if not hasattr(A__, A__ ) and not hasattr(A__, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) )
delattr(A__, A__ )
continue
SCREAMING_SNAKE_CASE_ : Any = p if hasattr(A__, A__ ) else equivalent_param[p]
setattr(A__, A__, getattr(A__, A__ ) )
delattr(A__, A__ )
return hparams, config
| 101
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A__ : Optional[Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
A__ : int = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase__ : List[str] = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Optional[Any] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = set()
UpperCAmelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : List[Any] = char
return pairs
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
UpperCAmelCase__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
UpperCAmelCase__ : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
UpperCAmelCase__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
UpperCAmelCase__ : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ : Dict = json.load(__UpperCamelCase )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : Optional[int] = errors # how to handle errors in decoding
UpperCAmelCase__ : Dict = bytes_to_unicode()
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : str = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase__ ( self )-> Optional[Any]:
return len(self.encoder )
def lowerCAmelCase__ ( self )-> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ : List[str] = bigram
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = 0
while i < len(__UpperCamelCase ):
try:
UpperCAmelCase__ : Dict = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[Any] = tuple(__UpperCamelCase )
UpperCAmelCase__ : int = new_word
if len(__UpperCamelCase ) == 1:
break
else:
UpperCAmelCase__ : Any = get_pairs(__UpperCamelCase )
UpperCAmelCase__ : Dict = " ".join(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = word
return word
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat , __UpperCamelCase ):
UpperCAmelCase__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
return self.decoder.get(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[str] = "".join(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Tuple = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
UpperCAmelCase__ : Dict = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ : str = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : str = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Optional[int] = " " + text
return (text, kwargs)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[Any]:
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[int]:
UpperCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
UpperCAmelCase__ : Any = " ".join(__UpperCamelCase )
UpperCAmelCase__ : int = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
UpperCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 716
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660
| 0
|
'''simple docstring'''
from typing import Any
def UpperCamelCase ( lowercase_ : list ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
lowercase =[input_list.count(lowercase_ ) for value in input_list]
lowercase =max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE_ = False
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> List[Any]:
__a : Any = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[Any] = torch.manual_seed(0 )
__a : Any = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 597
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False ) ->bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
a_ = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
a_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
a_ = primes[:idx]
break
a_ , a_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
a_ = False
for r in range(UpperCAmelCase ):
a_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
a_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCamelCase ( ) ->None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 210
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = 50 ) ->int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : str = '''facebook/wmt19-en-de'''
_lowerCamelCase : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : int = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_lowerCamelCase : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCamelCase : Optional[int] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_lowerCamelCase : Optional[Any] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 686
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 686
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase : bool = field(default=__a , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
lowerCAmelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase: List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
_UpperCamelCase: str = import_module('''tasks''' )
try:
_UpperCamelCase: Optional[Any] = getattr(lowercase , model_args.task_type )
_UpperCamelCase: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCamelCase: Dict = token_classification_task.get_labels(data_args.labels )
_UpperCamelCase: Dict[int, str] = dict(enumerate(lowercase ) )
_UpperCamelCase: Tuple = len(lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase: List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , idalabel=lowercase , labelaid={label: i for i, label in enumerate(lowercase )} , cache_dir=model_args.cache_dir , )
_UpperCamelCase: Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCamelCase: str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase: str = (
TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase: Any = (
TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase: np.ndarray , lowercase: np.ndarray ) -> Tuple[List[int], List[int]]:
_UpperCamelCase: Union[str, Any] = np.argmax(lowercase , axis=2 )
_UpperCamelCase , _UpperCamelCase: int = preds.shape
_UpperCamelCase: Union[str, Any] = [[] for _ in range(lowercase )]
_UpperCamelCase: Dict = [[] for _ in range(lowercase )]
for i in range(lowercase ):
for j in range(lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase: EvalPrediction ) -> Dict:
_UpperCamelCase , _UpperCamelCase: Dict = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase , lowercase ),
"precision": precision_score(lowercase , lowercase ),
"recall": recall_score(lowercase , lowercase ),
"f1": fa_score(lowercase , lowercase ),
}
# Data collator
_UpperCamelCase: Optional[Any] = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase: List[Any] = Trainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase: Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase: Optional[Any] = trainer.evaluate()
_UpperCamelCase: Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowercase , lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
# Predict
if training_args.do_predict:
_UpperCamelCase: List[str] = TokenClassificationDataset(
token_classification_task=lowercase , data_dir=data_args.data_dir , tokenizer=lowercase , labels=lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Union[str, Any] = trainer.predict(lowercase )
_UpperCamelCase , _UpperCamelCase: Optional[int] = align_predictions(lowercase , lowercase )
_UpperCamelCase: List[Any] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowercase , lowercase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_UpperCamelCase: Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowercase , lowercase , lowercase )
return results
def lowerCAmelCase_ ( lowercase: Dict ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 264
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
UpperCAmelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase : Optional[str] = field(default=__a , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase : Optional[str] = field(default=__a , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase : Optional[float] = field(
default=0.1_5 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase : int = field(default=3_2 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase : float = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Dict = {}
if self.train_dir is not None:
_UpperCamelCase: Tuple = self.train_dir
if self.validation_dir is not None:
_UpperCamelCase: Dict = self.validation_dir
_UpperCamelCase: Any = data_files if data_files else None
@dataclass
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : str = field(
default=__a , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__a )} , )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase : str = field(default=__a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase : Optional[int] = field(
default=__a , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __magic_name__ :
"""simple docstring"""
def __init__( self : Any , _lowercase : str=192 , _lowercase : Optional[Any]=32 , _lowercase : str=4 , _lowercase : Union[str, Any]=0.6 ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = input_size
_UpperCamelCase: int = mask_patch_size
_UpperCamelCase: Tuple = model_patch_size
_UpperCamelCase: List[str] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
_UpperCamelCase: str = self.input_size // self.mask_patch_size
_UpperCamelCase: Optional[Any] = self.mask_patch_size // self.model_patch_size
_UpperCamelCase: Dict = self.rand_size**2
_UpperCamelCase: int = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = np.random.permutation(self.token_count )[: self.mask_count]
_UpperCamelCase: int = np.zeros(self.token_count , dtype=_lowercase )
_UpperCamelCase: Optional[Any] = 1
_UpperCamelCase: List[Any] = mask.reshape((self.rand_size, self.rand_size) )
_UpperCamelCase: Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCAmelCase_ ( lowercase: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] )
_UpperCamelCase: List[str] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase: Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCamelCase: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase: Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_UpperCamelCase: Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCamelCase: Union[str, Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
_UpperCamelCase: Any = ds['''train'''].train_test_split(data_args.train_val_split )
_UpperCamelCase: Dict = split['''train''']
_UpperCamelCase: Any = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_UpperCamelCase: Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase )
elif model_args.model_name_or_path:
_UpperCamelCase: str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
_UpperCamelCase: Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase , '''decoder_type''' ):
_UpperCamelCase: str = '''simmim'''
# adapt config
_UpperCamelCase: Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
_UpperCamelCase: Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_UpperCamelCase: List[str] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_UpperCamelCase: Dict = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
_UpperCamelCase: int = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
_UpperCamelCase: Dict = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_UpperCamelCase: Tuple = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_UpperCamelCase: Optional[int] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase: Any = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
_UpperCamelCase: Tuple = ds['''train'''].column_names
else:
_UpperCamelCase: Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_UpperCamelCase: Optional[Any] = data_args.image_column_name
elif "image" in column_names:
_UpperCamelCase: Optional[int] = '''image'''
elif "img" in column_names:
_UpperCamelCase: Optional[Any] = '''img'''
else:
_UpperCamelCase: Union[str, Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_UpperCamelCase: Dict = Compose(
[
Lambda(lambda lowercase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_UpperCamelCase: Tuple = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase: Dict ):
_UpperCamelCase: Optional[Any] = [transforms(lowercase ) for image in examples[image_column_name]]
_UpperCamelCase: Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_UpperCamelCase: Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_UpperCamelCase: Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
_UpperCamelCase: Union[str, Any] = Trainer(
model=lowercase , args=lowercase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
_UpperCamelCase: int = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase: List[str] = last_checkpoint
_UpperCamelCase: int = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase: Any = trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase )
trainer.save_metrics('''eval''' , lowercase )
# Write model card and (optionally) push to hub
_UpperCamelCase: Union[str, Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 264
| 1
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = tmp_path / '''cache'''
__lowercase : Any = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase : Optional[Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = tmp_path / '''cache'''
__lowercase : Union[str, Any] = {'''text''': '''string'''}
__lowercase : Union[str, Any] = features.copy() if features else default_expected_features
__lowercase : Optional[Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase : Optional[int] = TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = tmp_path / '''cache'''
__lowercase : List[str] = {'''text''': '''string'''}
__lowercase : Tuple = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = [text_path]
__lowercase : Optional[Any] = tmp_path / '''cache'''
__lowercase : Dict = {'''text''': '''string'''}
__lowercase : List[Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__lowercase : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = tmp_path / '''cache'''
__lowercase : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase : Tuple = TextDatasetReader({'''train''': text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowercase : Tuple = {'''text''': '''string'''}
__lowercase : Optional[Any] = features.copy() if features else default_expected_features
__lowercase : List[Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase : int = TextDatasetReader({'''train''': text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if split:
__lowercase : Optional[int] = {split: text_path}
else:
__lowercase : Any = '''train'''
__lowercase : Tuple = {'''train''': text_path, '''test''': text_path}
__lowercase : List[Any] = tmp_path / '''cache'''
__lowercase : Any = {'''text''': '''string'''}
__lowercase : Union[str, Any] = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 76
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741
while r - l > 1:
__lowercase : int = (l + r) // 2
if v[m] >= key:
__lowercase : Any = m
else:
__lowercase : List[Any] = m # noqa: E741
return r
def __UpperCAmelCase ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return 0
__lowercase : List[str] = [0] * len(__UpperCamelCase )
__lowercase : Any = 1
__lowercase : Dict = v[0]
for i in range(1 , len(__UpperCamelCase ) ):
if v[i] < tail[0]:
__lowercase : Tuple = v[i]
elif v[i] > tail[length - 1]:
__lowercase : Optional[Any] = v[i]
length += 1
else:
__lowercase : Dict = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 1
|
import math
def __lowercase ( _A , _A ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 718
|
def __lowercase ( _A ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowercase ( _A ) -> dict[str, str]:
SCREAMING_SNAKE_CASE : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE : List[str] = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE : Dict = len(_A )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE : int = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
SCREAMING_SNAKE_CASE : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE : str = alphabet[i - offset]
SCREAMING_SNAKE_CASE : List[Any] = char
return cipher_alphabet
def __lowercase ( _A , _A ) -> str:
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def __lowercase ( _A , _A ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def __lowercase ( ) -> None:
SCREAMING_SNAKE_CASE : List[Any] = input("""Enter message to encode or decode: """ ).strip()
SCREAMING_SNAKE_CASE : str = input("""Enter keyword: """ ).strip()
SCREAMING_SNAKE_CASE : Dict = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE : int = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
SCREAMING_SNAKE_CASE : Optional[int] = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 446
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A_ ( __lowercase , __lowercase=() , __lowercase=None , __lowercase="no" , __lowercase="29500" ):
UpperCamelCase_ : Dict =False
UpperCamelCase_ : int =False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_ : Optional[int] =True
elif "IPython" in sys.modules:
UpperCamelCase_ : List[str] ='google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_ : str =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_ : Any =8
UpperCamelCase_ : List[str] =PrepareForLaunch(__lowercase , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port=__lowercase , mixed_precision=__lowercase ):
UpperCamelCase_ : Any =PrepareForLaunch(__lowercase , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_ : str ='1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*__lowercase )
def A_ ( __lowercase , __lowercase=() , __lowercase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_ : Optional[Any] =PrepareForLaunch(__lowercase , debug=__lowercase )
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='fork' )
| 357
|
"""simple docstring"""
from collections.abc import Callable
class a__ :
def __init__( self :Tuple , _lowerCamelCase :Callable | None = None ):
'''simple docstring'''
UpperCamelCase_ : list =[]
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ : dict ={}
# Stores current size of heap.
UpperCamelCase_ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ : List[str] =key or (lambda _lowerCamelCase : x)
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : List[str] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =self.arr[j], self.arr[i]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self._left(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self._right(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[Any] =right
return valid_parent
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Dict =parent, self._parent(_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : int =valid_parent, self._get_valid_parent(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : List[Any] =self.pos_map[item]
UpperCamelCase_ : int =[item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : Any =self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ : Dict =self.arr[self.size - 1]
UpperCamelCase_ : Optional[int] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
UpperCamelCase_ : str =[item, self.key(_lowerCamelCase )]
UpperCamelCase_ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->float:
A__ : Optional[Any] = 0.00
A__ : int = 0
for resistor in resistors:
if resistor <= 0:
A__ : List[str] = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(_SCREAMING_SNAKE_CASE )
first_sum += 1 / float(_SCREAMING_SNAKE_CASE )
index += 1
return 1 / first_sum
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->float:
A__ : Dict = 0.00
A__ : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A__ : Optional[int] = f'Resistor at index {index} has a negative value!'
raise ValueError(_SCREAMING_SNAKE_CASE )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : List[Any] = self.dummy_uncond_unet
A__ : Dict = ScoreSdeVeScheduler()
A__ : str = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Optional[int] = torch.manual_seed(0 )
A__ : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case ).images
A__ : List[str] = torch.manual_seed(0 )
A__ : int = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case , return_dict=snake_case )[
0
]
A__ : int = image[0, -3:, -3:, -1]
A__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = """google/ncsnpp-church-256"""
A__ : Tuple = UNetaDModel.from_pretrained(snake_case )
A__ : int = ScoreSdeVeScheduler.from_pretrained(snake_case )
A__ : List[Any] = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Dict = torch.manual_seed(0 )
A__ : Optional[int] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=snake_case ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 498
| 0
|
import os
import numpy
import onnx
def lowercase ( __A : str , __A : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = a.name
snake_case : int = b.name
snake_case : Tuple = """"""
snake_case : Any = """"""
snake_case : Optional[int] = a == b
snake_case : Any = name_a
snake_case : str = name_b
return res
def lowercase ( __A : Optional[int] , __A : List[Any] , __A : List[Any] ) -> int:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__A , __A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
_graph_replace_input_with(node_proto.attribute[1].g , __A , __A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
def lowercase ( __A : Tuple , __A : Tuple , __A : List[str] ) -> Optional[Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__A , __A , __A )
def lowercase ( __A : Dict , __A : Any , __A : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = list(model.graph.initializer )
snake_case : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case : Optional[int] = inits[i].name
snake_case : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __A , __A )
def lowercase ( __A : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = os.path.dirname(__A )
snake_case : Union[str, Any] = os.path.basename(__A )
snake_case : Dict = onnx.load(os.path.join(__A , __A ) )
snake_case : Optional[Any] = list(model.graph.initializer )
snake_case : Optional[Any] = set()
snake_case : Optional[int] = {}
snake_case : Optional[int] = []
snake_case : List[str] = 0
for i in range(len(__A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__A )
dup_set.add(__A )
snake_case : Optional[Any] = inits[j].data_type
snake_case : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __A )
total_reduced_size += mem_size
snake_case : Tuple = inits[i].name
snake_case : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__A )
else:
snake_case : int = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
snake_case : Union[str, Any] = sorted(__A )
_remove_dup_initializers_from_model(__A , __A , __A )
snake_case : List[str] = """optimized_""" + model_file_name
snake_case : List[Any] = os.path.join(__A , __A )
onnx.save(__A , __A )
return new_model
| 36
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( __a , unittest.TestCase ):
_A :Tuple = KandinskyVaaInpaintPipeline
_A :Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A :Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A :Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A :Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**snake_case__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case__ , )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 ):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(snake_case__ ).startswith("""mps""" ):
lowercase = torch.manual_seed(snake_case__ )
else:
lowercase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**snake_case__ )
lowercase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((7_68, 7_68) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 428
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = '''ctrl'''
__magic_name__ : List[str] = ['''past_key_values''']
__magic_name__ : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase__=246_534 , lowerCamelCase__=256 , lowerCamelCase__=1_280 , lowerCamelCase__=8_192 , lowerCamelCase__=48 , lowerCamelCase__=16 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-6 , lowerCamelCase__=0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = vocab_size
snake_case__ : Union[str, Any] = n_positions
snake_case__ : Optional[Any] = n_embd
snake_case__ : str = n_layer
snake_case__ : Tuple = n_head
snake_case__ : Union[str, Any] = dff
snake_case__ : str = resid_pdrop
snake_case__ : Optional[Any] = embd_pdrop
snake_case__ : Tuple = layer_norm_epsilon
snake_case__ : List[Any] = initializer_range
snake_case__ : Tuple = use_cache
super().__init__(**lowerCamelCase__)
| 150
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> bool:
"""simple docstring"""
A__ = len(UpperCAmelCase_ )
A__ = len(UpperCAmelCase_ )
A__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ = True
for i in range(UpperCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ = True
if a[i].islower():
A__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ :Any = emb.weight.shape
UpperCAmelCase__ :List[str] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = emb.weight.data
return lin_layer
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = {}
for old_key in state_dict.keys():
UpperCAmelCase__ :List[str] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase__ :Dict = key.replace('moe_layer.experts.0' , f"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase__ :List[Any] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
UpperCAmelCase__ :Optional[Any] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
UpperCAmelCase__ :Union[str, Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
UpperCAmelCase__ :Dict = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
UpperCAmelCase__ :List[str] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase__ :Optional[int] = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
UpperCAmelCase__ :Optional[Any] = key.replace('final_layer_norm' , 'ff_layer_norm' )
UpperCAmelCase__ :List[Any] = state_dict[old_key]
return new_dict
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
"""simple docstring"""
UpperCAmelCase__ :int = []
UpperCAmelCase__ :List[str] = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
for expert in range(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Tuple = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Dict = torch.load(SCREAMING_SNAKE_CASE )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[Any] = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
UpperCAmelCase__ :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
UpperCAmelCase__ :Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :str = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE ) == 1:
UpperCAmelCase__ :int = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
UpperCAmelCase__ :int = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Optional[int] = weights_name.replace('.bin' , f"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin""" )
UpperCAmelCase__ :Tuple = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
for key in shard:
UpperCAmelCase__ :Optional[int] = shard_file
# Add the metadata
UpperCAmelCase__ :Union[str, Any] = {'total_size': total_size}
UpperCAmelCase__ :List[str] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ :Tuple = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '\n'
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case : Tuple = parser.parse_args()
__snake_case , __snake_case : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__snake_case : int = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case : int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 433
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=24 , A=2 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=None , A=10_00 , ) ->Any:
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :List[str] = batch_size
UpperCAmelCase__ :Optional[int] = seq_length
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_input_mask
UpperCAmelCase__ :Optional[int] = use_token_type_ids
UpperCAmelCase__ :int = use_labels
UpperCAmelCase__ :Tuple = vocab_size
UpperCAmelCase__ :int = hidden_size
UpperCAmelCase__ :Any = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :List[str] = hidden_act
UpperCAmelCase__ :Any = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :List[str] = max_position_embeddings
UpperCAmelCase__ :str = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :int = initializer_range
UpperCAmelCase__ :str = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = range_bbox
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :List[Any] = bbox[i, j, 3]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCAmelCase__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :Any = bbox[i, j, 2]
UpperCAmelCase__ :Dict = bbox[i, j, 0]
UpperCAmelCase__ :Optional[Any] = t
UpperCAmelCase__ :int = None
if self.use_input_mask:
UpperCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ :int = None
if self.use_token_type_ids:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A__ ( self , A , A , A , A , A , A , A , ) ->Any:
UpperCAmelCase__ :Any = LiltModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(A , bbox=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase__ :List[str] = model(A , bbox=A , token_type_ids=A )
UpperCAmelCase__ :int = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Dict:
UpperCAmelCase__ :List[str] = self.num_labels
UpperCAmelCase__ :Optional[Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :str = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :str = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Dict = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : int = False
def A__ ( self , A , A , A , A , A ) ->str:
return True
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Dict = LiltModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A__ ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A__ ( self ) ->int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Union[str, Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->int:
UpperCAmelCase__ :int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A )
UpperCAmelCase__ :List[Any] = torch.tensor([[1, 2]] , device=A )
UpperCAmelCase__ :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ :Union[str, Any] = model(input_ids=A , bbox=A )
UpperCAmelCase__ :Tuple = torch.Size([1, 2, 7_68] )
UpperCAmelCase__ :Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 433
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "sew-d"
def __init__( self, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=("p2c", "c2p"), SCREAMING_SNAKE_CASE_="layer_norm", SCREAMING_SNAKE_CASE_="gelu_python", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-7, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_="group", SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = hidden_size
UpperCamelCase : int = feat_extract_norm
UpperCamelCase : Any = feat_extract_activation
UpperCamelCase : List[str] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = conv_bias
UpperCamelCase : List[Any] = num_conv_pos_embeddings
UpperCamelCase : Tuple = num_conv_pos_embedding_groups
UpperCamelCase : Dict = len(self.conv_dim )
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : List[Any] = squeeze_factor
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Optional[Any] = position_buckets
UpperCamelCase : Optional[int] = share_att_key
UpperCamelCase : List[str] = relative_attention
UpperCamelCase : List[str] = norm_rel_ebd
UpperCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Tuple = hidden_dropout
UpperCamelCase : Optional[int] = attention_dropout
UpperCamelCase : Dict = activation_dropout
UpperCamelCase : Union[str, Any] = feat_proj_dropout
UpperCamelCase : Tuple = final_dropout
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Tuple = feature_layer_norm_eps
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase : List[Any] = apply_spec_augment
UpperCamelCase : str = mask_time_prob
UpperCamelCase : Union[str, Any] = mask_time_length
UpperCamelCase : Tuple = mask_time_min_masks
UpperCamelCase : List[Any] = mask_feature_prob
UpperCamelCase : List[Any] = mask_feature_length
UpperCamelCase : Dict = mask_feature_min_masks
# ctc loss
UpperCamelCase : int = ctc_loss_reduction
UpperCamelCase : Tuple = ctc_zero_infinity
# sequence classification
UpperCamelCase : str = use_weighted_layer_sum
UpperCamelCase : Optional[Any] = classifier_proj_size
@property
def snake_case_ ( self ) -> List[str]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 40
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
_A = PegasusConfig
_A = {}
_A = "gelu"
def __init__( self : Any , A_ : int , A_ : Any=13 , A_ : Union[str, Any]=7 , A_ : List[str]=True , A_ : Dict=False , A_ : Any=99 , A_ : Optional[int]=32 , A_ : Tuple=5 , A_ : Optional[Any]=4 , A_ : Tuple=37 , A_ : str=0.1 , A_ : str=0.1 , A_ : Any=20 , A_ : List[str]=2 , A_ : List[Any]=1 , A_ : str=0 , ) -> int:
"""simple docstring"""
lowerCamelCase_: int = parent
lowerCamelCase_: Any = batch_size
lowerCamelCase_: Optional[Any] = seq_length
lowerCamelCase_: Optional[Any] = is_training
lowerCamelCase_: str = use_labels
lowerCamelCase_: Union[str, Any] = vocab_size
lowerCamelCase_: Optional[int] = hidden_size
lowerCamelCase_: Dict = num_hidden_layers
lowerCamelCase_: Optional[int] = num_attention_heads
lowerCamelCase_: List[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: Optional[int] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Tuple = pad_token_id
lowerCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase_: int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_: Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_: Any = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : str , A_ : Tuple , A_ : Optional[int] , A_ : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_: Any = 20
lowerCamelCase_: Union[str, Any] = model_class_name(A_ )
lowerCamelCase_: List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
lowerCamelCase_: Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase_: Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_: Tuple = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , )
lowerCamelCase_: Dict = model.decode(A_ , A_ )
lowerCamelCase_: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase ( self : int , A_ : Union[str, Any] , A_ : List[str] , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = 20
lowerCamelCase_: Optional[int] = model_class_name(A_ )
lowerCamelCase_: int = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_: int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_: List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_: Optional[int] = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
lowerCamelCase_: int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , )
lowerCamelCase_: Optional[int] = model.decode(A_ , A_ , decoder_attention_mask=A_ )
lowerCamelCase_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase_: Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_A = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_A = True
_A = False
_A = False
_A = False
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = FlaxPegasusModelTester(self )
lowerCamelCase_: int = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A_ , A_ , A_ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_: Tuple = self._prepare_for_class(A_ , A_ )
lowerCamelCase_: List[str] = model_class(A_ )
@jax.jit
def encode_jitted(A_ : Optional[int] , A_ : List[str]=None , **A_ : int ):
return model.encode(input_ids=A_ , attention_mask=A_ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_: Optional[int] = encode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_: Dict = encode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_: Dict = model_class(A_ )
lowerCamelCase_: Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase_: Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A_ : Optional[Any] , A_ : List[Any] , A_ : Tuple ):
return model.decode(
decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_: List[str] = decode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_: Any = decode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_: str = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=A_ )
lowerCamelCase_: str = np.ones((1, 1) )
lowerCamelCase_: Dict = model(A_ )
self.assertIsNotNone(A_ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase_: str = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase_: List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCamelCase_: Dict = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCamelCase_: Dict = tokenizer(A_ , return_tensors="""np""" , truncation=A_ , max_length=5_12 , padding=A_ )
lowerCamelCase_: int = model.generate(**A_ , num_beams=2 ).sequences
lowerCamelCase_: Optional[int] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
assert tgt_text == decoded
| 423
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 'xlm'
_UpperCamelCase : Optional[Any] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , snake_case=30145 , snake_case=2048 , snake_case=12 , snake_case=16 , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=1 , snake_case=True , snake_case=512 , snake_case=2048**-0.5 , snake_case=1E-1_2 , snake_case=0.02 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=3 , snake_case=5 , snake_case=True , snake_case="first" , snake_case=True , snake_case=None , snake_case=True , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=0 , snake_case=0 , snake_case=2 , snake_case=0 , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = emb_dim
UpperCamelCase__ = n_layers
UpperCamelCase__ = n_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = gelu_activation
UpperCamelCase__ = sinusoidal_embeddings
UpperCamelCase__ = causal
UpperCamelCase__ = asm
UpperCamelCase__ = n_langs
UpperCamelCase__ = use_lang_emb
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bos_index
UpperCamelCase__ = eos_index
UpperCamelCase__ = pad_index
UpperCamelCase__ = unk_index
UpperCamelCase__ = mask_index
UpperCamelCase__ = is_encoder
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = embed_init_std
UpperCamelCase__ = init_std
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_proj_to_labels
UpperCamelCase__ = summary_first_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = mask_token_id
UpperCamelCase__ = lang_id
if "n_words" in kwargs:
UpperCamelCase__ = kwargs["n_words"]
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , **snake_case )
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 185
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = 'ibert'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=False , snake_case="none" , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = quant_mode
UpperCamelCase__ = force_dequant
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=1_0 ) -> Any:
UpperCamelCase_: Optional[int] = []
for _ in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=1_0 ) -> str:
UpperCamelCase_: Optional[int] = []
for step in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = torch.load(UpperCAmelCase__ )
scheduler.load_state_dict(UpperCAmelCase__ )
return lrs
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for a, b in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertAlmostEqual(_lowerCamelCase , _lowerCamelCase , delta=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowerCamelCase )
UpperCamelCase_: List[str] = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase_: int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase_: Optional[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCamelCase_: str = criterion(_lowerCamelCase , _lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _a ( self ):
UpperCamelCase_: Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowerCamelCase )
UpperCamelCase_: Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase_: Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase_: List[Any] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_lowerCamelCase , weight_decay=0.0 , relative_step=_lowerCamelCase , scale_parameter=_lowerCamelCase , warmup_init=_lowerCamelCase , )
for _ in range(1_0_0_0 ):
UpperCamelCase_: Dict = criterion(_lowerCamelCase , _lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Any =nn.Linear(50 , 50 ) if is_torch_available() else None
a : Dict =AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
a : Union[str, Any] =10
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for a, b in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertAlmostEqual(_lowerCamelCase , _lowerCamelCase , delta=_lowerCamelCase , msg=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase_: Tuple = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = data
UpperCamelCase_: Optional[int] = scheduler_func(self.optimizer , **_lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase_: List[Any] = unwrap_schedule(_lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
_lowerCamelCase , _lowerCamelCase , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
UpperCamelCase_: Optional[Any] = scheduler_func(self.optimizer , **_lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_lowerCamelCase ) # wrap to test picklability of the schedule
UpperCamelCase_: List[Any] = unwrap_and_save_reload_schedule(_lowerCamelCase , self.num_steps )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Any = fn
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.fn(*_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: int = list(map(self , scheduler.lr_lambdas ) )
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCamelCase__ : Dict = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCamelCase__ : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
UpperCamelCase__ : List[str] = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_( A ):
def remove_articles(A ):
UpperCAmelCase_ = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(A , """ """ , A )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def A_( A , A ):
return int(normalize_answer(A ) == normalize_answer(A ) )
def A_( A , A ):
UpperCAmelCase_ = [any(compute_exact(A , A ) for ref in refs ) for pred, refs in zip(A , A )]
return (sum(A ) / len(A )) * 100
def A_( A , A , A , A ):
UpperCAmelCase_ = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase_ = Counter(A )
UpperCAmelCase_ = Counter(A )
UpperCAmelCase_ = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase_ = scount * numref
UpperCAmelCase_ = Counter(A )
UpperCAmelCase_ = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase_ = ccount * numref
# KEEP
UpperCAmelCase_ = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase_ = keepgramcounter_rep & rgramcounter
UpperCAmelCase_ = sgramcounter_rep & rgramcounter
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
if len(A ) > 0:
UpperCAmelCase_ = keeptmpscorea / len(A )
if len(A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase_ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase_ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase_ = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase_ = delgramcounter_rep - rgramcounter
UpperCAmelCase_ = sgramcounter_rep - rgramcounter
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ = 1
if len(A ) > 0:
UpperCAmelCase_ = deltmpscorea / len(A )
# ADDITION
UpperCAmelCase_ = set(A ) - set(A )
UpperCAmelCase_ = set(A ) & set(A )
UpperCAmelCase_ = set(A ) - set(A )
UpperCAmelCase_ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
if len(A ) > 0:
UpperCAmelCase_ = addtmpscore / len(A )
if len(A ) > 0:
UpperCAmelCase_ = addtmpscore / len(A )
UpperCAmelCase_ = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_( A , A , A ):
UpperCAmelCase_ = len(A )
UpperCAmelCase_ = ssent.split(""" """ )
UpperCAmelCase_ = csent.split(""" """ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rsent in rsents:
UpperCAmelCase_ = rsent.split(""" """ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
UpperCAmelCase_ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(A )
if i < len(A ) - 2:
UpperCAmelCase_ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(A )
if i < len(A ) - 3:
UpperCAmelCase_ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(A )
ragramslist.append(A )
ragramslist.append(A )
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
UpperCAmelCase_ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(A )
if i < len(A ) - 2:
UpperCAmelCase_ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(A )
if i < len(A ) - 3:
UpperCAmelCase_ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
UpperCAmelCase_ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(A )
if i < len(A ) - 2:
UpperCAmelCase_ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(A )
if i < len(A ) - 3:
UpperCAmelCase_ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(A )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = SARIngram(A , A , A , A )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = SARIngram(A , A , A , A )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = SARIngram(A , A , A , A )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = SARIngram(A , A , A , A )
UpperCAmelCase_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase_ = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase_ = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase_ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_( A , A = True , A = "13a" , A = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase_ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase_ = sacrebleu.metrics.bleu._get_tokenizer(A )()(A )
else:
UpperCAmelCase_ = sacrebleu.TOKENIZERS[tokenizer]()(A )
elif tokenizer == "moses":
UpperCAmelCase_ = sacremoses.MosesTokenizer().tokenize(A , return_str=A , escape=A )
elif tokenizer == "penn":
UpperCAmelCase_ = sacremoses.MosesTokenizer().penn_tokenize(A , return_str=A )
else:
UpperCAmelCase_ = sentence
if not return_str:
UpperCAmelCase_ = normalized_sent.split()
return normalized_sent
def A_( A , A , A ):
if not (len(A ) == len(A ) == len(A )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase_ = 0
for src, pred, refs in zip(A , A , A ):
sari_score += SARIsent(normalize(A ) , normalize(A ) , [normalize(A ) for sent in refs] )
UpperCAmelCase_ = sari_score / len(A )
return 100 * sari_score
def A_( A , A , A="exp" , A=None , A=False , A=False , A=False , ):
UpperCAmelCase_ = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(A )]
UpperCAmelCase_ = sacrebleu.corpus_bleu(
A , A , smooth_method=A , smooth_value=A , force=A , lowercase=A , use_effective_order=A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowercase : str , __lowercase : List[str] , __lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = {}
result.update({"""sari""": compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({"""exact""": compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 704
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_ = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
UpperCAmelCase_ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowercase : Optional[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__lowercase : Optional[Any] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : int , **__lowercase : int ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
UpperCAmelCase_ = self.get_image_processor(do_normalize=__lowercase )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = processor(text=__lowercase )
UpperCAmelCase_ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__lowercase )
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 486
| 0
|
import numpy as np
def lowerCAmelCase ( UpperCamelCase__ : np.array ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase_ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase_ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
UpperCAmelCase_ : str = "audio"
UpperCAmelCase_ : str = "transcription"
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , __lowerCAmelCase):
raise ValueError(f"Column {self.audio_column} is not an Audio type.")
lowerCAmelCase = copy.deepcopy(self)
lowerCAmelCase = self.input_schema.copy()
lowerCAmelCase = features[self.audio_column]
lowerCAmelCase = input_schema
return task_template
@property
def a_ ( self):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 370
| 0
|
import warnings
from .generation import TFGenerationMixin
class _A ( UpperCAmelCase_ ):
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCAmelCase_ , )
| 709
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Dict = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
__UpperCamelCase : int = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def a ( self : Optional[int] ):
"""simple docstring"""
if self.streaming:
__UpperCamelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase : Any = None
__UpperCamelCase : int = None
__UpperCamelCase : int = None
__UpperCamelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
__UpperCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 515
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
__snake_case = {
"""squeezebert/squeezebert-uncased""": 5_12,
"""squeezebert/squeezebert-mnli""": 5_12,
"""squeezebert/squeezebert-mnli-headless""": 5_12,
}
__snake_case = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : str =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict =SqueezeBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('''type''' ) )
UpperCamelCase :Any = do_lower_case
UpperCamelCase :Tuple = strip_accents
UpperCamelCase :int = tokenize_chinese_chars
UpperCamelCase :Tuple = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = do_lower_case
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :List[Any] = [self.sep_token_id]
UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase :Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 658
|
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = pd.read_csv('sample_data.csv', header=None)
SCREAMING_SNAKE_CASE = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = len_data - periods * look_back
SCREAMING_SNAKE_CASE = actual_data[:division]
SCREAMING_SNAKE_CASE = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE = np.array(train_x)
SCREAMING_SNAKE_CASE = np.array(test_x)
SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
SCREAMING_SNAKE_CASE = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE = model.predict(x_test)
| 186
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string'''),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self , _A , _A) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0.0
for i, j in zip(_A , _A):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A) else 0.0
_UpperCAmelCase : Tuple = n_correct / len(_A)
return {
"accuracy": accuracy,
}
| 186
| 1
|
import random
def _A (UpperCamelCase : list , UpperCamelCase : Union[str, Any] ) ->tuple:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase )
elif element > pivot:
greater.append(UpperCamelCase )
else:
equal.append(UpperCamelCase )
return less, equal, greater
def _A (UpperCamelCase : list , UpperCamelCase : int ) ->Optional[int]:
'''simple docstring'''
if index >= len(UpperCamelCase ) or index < 0:
return None
lowerCamelCase__ : List[str] = items[random.randint(0 , len(UpperCamelCase ) - 1 )]
lowerCamelCase__ : Dict = 0
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : str = _partition(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[str] = len(UpperCamelCase )
lowerCamelCase__ : Any = len(UpperCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase , UpperCamelCase )
# must be in larger
else:
return quick_select(UpperCamelCase , index - (m + count) )
| 157
|
from copy import deepcopy
class __A :
def __init__(self , __magic_name__ = None , __magic_name__ = None ):
if arr is None and size is not None:
lowerCamelCase__ : int = size
lowerCamelCase__ : Union[str, Any] = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : str = len(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
lowerCamelCase__ : List[Any] = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase__ : List[str] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case (__magic_name__ ):
return index + (index & (-index))
@staticmethod
def _snake_case (__magic_name__ ):
return index - (index & (-index))
def _snake_case (self , __magic_name__ , __magic_name__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase__ : int = self.next_(__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def _snake_case (self , __magic_name__ ):
if right == 0:
return 0
lowerCamelCase__ : Optional[int] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase__ : Dict = self.prev(__magic_name__ )
return result
def _snake_case (self , __magic_name__ , __magic_name__ ):
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def _snake_case (self , __magic_name__ ):
return self.query(__magic_name__ , index + 1 )
def _snake_case (self , __magic_name__ ):
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase__ : Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase__ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCAmelCase: Dict = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
UpperCAmelCase: Optional[int] = parser.parse_args()
UpperCAmelCase: List[str] = """cpu"""
UpperCAmelCase: Optional[int] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
UpperCAmelCase: Any = """path-to-your-trained-model"""
UpperCAmelCase: Any = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCAmelCase: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase: Optional[Any] = pipe.to(device)
# to channels last
UpperCAmelCase: Any = pipe.unet.to(memory_format=torch.channels_last)
UpperCAmelCase: Optional[Any] = pipe.vae.to(memory_format=torch.channels_last)
UpperCAmelCase: Dict = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCAmelCase: Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCAmelCase: Any = torch.randn(2, 4, 64, 64)
UpperCAmelCase: str = torch.rand(1) * 999
UpperCAmelCase: Tuple = torch.randn(2, 77, 768)
UpperCAmelCase: str = (sample, timestep, encoder_hidden_status)
try:
UpperCAmelCase: Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCAmelCase: Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase: Union[str, Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase: Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCAmelCase: Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCAmelCase: Union[str, Any] = 666
UpperCAmelCase: List[Any] = torch.Generator(device).manual_seed(seed)
UpperCAmelCase: Any = {"""generator""": generator}
if args.steps is not None:
UpperCAmelCase: Union[str, Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCAmelCase: Optional[int] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 600
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "upernet"
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=[1, 2, 3, 6] ,UpperCAmelCase_=True ,UpperCAmelCase_=0.4 ,UpperCAmelCase_=3_84 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,UpperCAmelCase_=2_55 ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase : List[str] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = backbone_config.get("""model_type""" )
_lowercase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowercase : str = config_class.from_dict(UpperCAmelCase_ )
_lowercase : Union[str, Any] = backbone_config
_lowercase : Dict = hidden_size
_lowercase : int = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : Dict = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : List[str] = auxiliary_in_channels
_lowercase : List[str] = auxiliary_channels
_lowercase : Optional[int] = auxiliary_num_convs
_lowercase : List[Any] = auxiliary_concat_input
_lowercase : List[Any] = loss_ignore_index
def lowerCamelCase__ ( self ):
_lowercase : int = copy.deepcopy(self.__dict__ )
_lowercase : List[Any] = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 600
| 1
|
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> str:
'''simple docstring'''
lowerCAmelCase__ = int(UpperCamelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = divmod(UpperCamelCase_ , 2 )
return binary_recursive(UpperCamelCase_ ) + str(UpperCamelCase_ )
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = str(UpperCamelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCAmelCase__ = "-" if number.startswith("-" ) else ""
lowerCAmelCase__ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(UpperCamelCase_ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
|
from random import randint, random
def a__ ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ):
_UpperCAmelCase : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = max(snake_case__ , 0 )
while i < number_of_cells:
_UpperCAmelCase : int = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a__ ( snake_case__ : list , snake_case__ : int ):
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : int = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def a__ ( snake_case__ : list , snake_case__ : float , snake_case__ : int ):
_UpperCAmelCase : Optional[Any] = len(snake_case__ )
# Beforce calculations, the highway is empty
_UpperCAmelCase : Dict = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase : Dict = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
_UpperCAmelCase : List[str] = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
_UpperCAmelCase : List[str] = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase : Dict = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ):
_UpperCAmelCase : Union[str, Any] = len(highway[0] )
for i in range(snake_case__ ):
_UpperCAmelCase : Tuple = update(highway[i] , snake_case__ , snake_case__ )
_UpperCAmelCase : int = [-1] * number_of_cells
for car_index in range(snake_case__ ):
_UpperCAmelCase : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase : Optional[Any] = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Any=True , UpperCamelCase : Optional[int]="pt" ) -> int:
"""simple docstring"""
a_ = {"""add_prefix_space""": True} if isinstance(UpperCamelCase , UpperCamelCase ) and not line.startswith(""" """ ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=UpperCamelCase , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase , **UpperCamelCase , )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=None , ) -> Any:
"""simple docstring"""
a_ = input_ids.ne(UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , ):
super().__init__()
a_ = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + """.source""" )
a_ = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + """.target""" )
a_ = self.get_char_lens(self.src_file )
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE ):
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip("""\n""" )
a_ = linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
a_ = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , """right""" )
a_ = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , """right""" )
a_ = source_inputs["""input_ids"""].squeeze()
a_ = target_inputs["""input_ids"""].squeeze()
a_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __magic_name__ ( _SCREAMING_SNAKE_CASE ):
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = torch.stack([x["""input_ids"""] for x in batch] )
a_ = torch.stack([x["""attention_mask"""] for x in batch] )
a_ = torch.stack([x["""decoder_input_ids"""] for x in batch] )
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
a_ = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ , a_ = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
a_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_A = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[List] ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> None:
"""simple docstring"""
a_ = get_git_info()
save_json(UpperCamelCase , os.path.join(UpperCamelCase , """git_log.json""" ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Any=4 , **UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase , """w""" ) as f:
json.dump(UpperCamelCase , UpperCamelCase , indent=UpperCamelCase , **UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase ) as f:
return json.load(UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=UpperCamelCase )
a_ = {
"""repo_id""": str(UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Callable , UpperCamelCase : Iterable ) -> List:
"""simple docstring"""
return list(map(UpperCamelCase , UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase , """wb""" ) as f:
return pickle.dump(UpperCamelCase , UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
def remove_articles(UpperCamelCase : Union[str, Any] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , UpperCamelCase )
def white_space_fix(UpperCamelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : Optional[int] ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
a_ = normalize_answer(UpperCamelCase ).split()
a_ = normalize_answer(UpperCamelCase ).split()
a_ = Counter(UpperCamelCase ) & Counter(UpperCamelCase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(UpperCamelCase )
a_ = 1.0 * num_same / len(UpperCamelCase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
assert len(UpperCamelCase ) == len(UpperCamelCase )
a_ = 0
for hypo, pred in zip(UpperCamelCase , UpperCamelCase ):
em += exact_match_score(UpperCamelCase , UpperCamelCase )
if len(UpperCamelCase ) > 0:
em /= len(UpperCamelCase )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = """dropout_rate"""
for p in extra_params:
if getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if not hasattr(UpperCamelCase , UpperCamelCase ) and not hasattr(UpperCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCamelCase ) )
delattr(UpperCamelCase , UpperCamelCase )
continue
a_ = p if hasattr(UpperCamelCase , UpperCamelCase ) else equivalent_param[p]
setattr(UpperCamelCase , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
delattr(UpperCamelCase , UpperCamelCase )
return hparams, config
| 403
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] , UpperCamelCase : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCamelCase ) == len(UpperCamelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
a_ , a_ , a_ = equationa
a_ , a_ , a_ = equationa
# Calculate the determinants of the matrices
a_ = aa * ba - aa * ba
a_ = ca * ba - ca * ba
a_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a_ = determinant_x / determinant
a_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 403
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
def __init__( self : List[Any] , _A : Optional[int] , ):
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[Any] ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ):
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : Tuple , _A : Tuple , _A : List[Any] , ):
_UpperCamelCase = True
_UpperCamelCase = TFEsmModel(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_UpperCamelCase = model(_A )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
_UpperCamelCase = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , _A : List[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Tuple ):
_UpperCamelCase = TFEsmForMaskedLM(config=_A )
_UpperCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFEsmForTokenClassification(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = TFEsmModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : str ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
_UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCamelCase = model(_A )[0]
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 10
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase__ : Optional[List[str]] = None
lowercase__ : List[Any] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase__ : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = True
_snake_case : Optional[str] = None
# Automatically constructed
_snake_case : ClassVar[str] = "PIL.Image.Image"
_snake_case : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_snake_case : str = field(default='Image' , init=__magic_name__ , repr=__magic_name__ )
def __call__( self : Any ) -> str:
'''simple docstring'''
return self.pa_type
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = np.array(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase__ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : dict , lowerCAmelCase__ : Dict=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
_UpperCamelCase = {}
_UpperCamelCase , _UpperCamelCase = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(lowerCAmelCase__ ):
_UpperCamelCase = PIL.Image.open(lowerCAmelCase__ )
else:
_UpperCamelCase = path.split('''::''' )[-1]
try:
_UpperCamelCase = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
_UpperCamelCase = token_per_repo_id.get(lowerCAmelCase__ )
except ValueError:
_UpperCamelCase = None
with xopen(lowerCAmelCase__ , '''rb''' , use_auth_token=lowerCAmelCase__ ) as f:
_UpperCamelCase = BytesIO(f.read() )
_UpperCamelCase = PIL.Image.open(bytes_ )
else:
_UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def snake_case__ ( self : int , lowerCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_UpperCamelCase = storage.field('''bytes''' )
else:
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_UpperCamelCase = storage.field('''path''' )
else:
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_UpperCamelCase = pa.array(
[encode_np_array(np.array(lowerCAmelCase__ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase__ : Optional[Any] ):
with xopen(lowerCAmelCase__ , '''rb''' ) as f:
_UpperCamelCase = f.read()
return bytes_
_UpperCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCamelCase = pa.array(
[os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def a__ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a__ ( lowercase : "PIL.Image.Image" ) -> bytes:
"""simple docstring"""
_UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
_UpperCamelCase = image.format
else:
_UpperCamelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(lowercase, format=lowercase )
return buffer.getvalue()
def a__ ( lowercase : "PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(lowercase, '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def a__ ( lowercase : np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
_UpperCamelCase = array.dtype
_UpperCamelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
_UpperCamelCase = dtype.kind
_UpperCamelCase = dtype.itemsize
_UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_UpperCamelCase = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_UpperCamelCase = dtype_byteorder + dtype_kind + str(lowercase )
_UpperCamelCase = np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_UpperCamelCase = PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def a__ ( lowercase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
_UpperCamelCase , _UpperCamelCase = first_non_null_value(lowercase )
if isinstance(lowercase, lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase, np.ndarray ):
_UpperCamelCase = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase, PIL.Image.Image ):
_UpperCamelCase = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 98
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowercase ( a , a ):
lowercase__ : Tuple = """pixel_values"""
lowercase__ : Union[str, Any] = False
lowercase__ : Dict = TimmBackboneConfig
def __init__( self : Dict , _UpperCamelCase : Tuple , **_UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(_UpperCamelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , "use_pretrained_backbone" , _UpperCamelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE = config.out_indices if getattr(_UpperCamelCase , "out_indices" , _UpperCamelCase ) is not None else (-1,)
SCREAMING_SNAKE_CASE = timm.create_model(
config.backbone , pretrained=_UpperCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=_UpperCamelCase , **_UpperCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE = self._backbone.return_layers
SCREAMING_SNAKE_CASE = {layer["module"]: str(_UpperCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_UpperCamelCase )
@classmethod
def __snake_case( cls : int , _UpperCamelCase : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE = kwargs.pop("config" , TimmBackboneConfig() )
SCREAMING_SNAKE_CASE = kwargs.pop("use_timm_backbone" , _UpperCamelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
SCREAMING_SNAKE_CASE = kwargs.pop("num_channels" , config.num_channels )
SCREAMING_SNAKE_CASE = kwargs.pop("features_only" , config.features_only )
SCREAMING_SNAKE_CASE = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE = kwargs.pop("out_indices" , config.out_indices )
SCREAMING_SNAKE_CASE = TimmBackboneConfig(
backbone=_UpperCamelCase , num_channels=_UpperCamelCase , features_only=_UpperCamelCase , use_pretrained_backbone=_UpperCamelCase , out_indices=_UpperCamelCase , )
return super()._from_config(_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : str=None , **_UpperCamelCase : List[Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE = self._all_layers
SCREAMING_SNAKE_CASE = self._backbone(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = self._return_layers
SCREAMING_SNAKE_CASE = tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE = self._backbone(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tuple(_UpperCamelCase ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_UpperCamelCase , hidden_states=_UpperCamelCase , attentions=_UpperCamelCase )
| 647
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class snake_case__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 455
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = 'laion/clap-htsat-unfused'
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
def _lowerCAmelCase ( self , **A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self , **A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : List[str] = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : int = floats_list((3, 1000) )
_lowerCamelCase : str = feature_extractor(A , return_tensors='np' )
_lowerCamelCase : List[str] = processor(audios=A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : List[str] = 'This is a test string'
_lowerCamelCase : Any = processor(text=A )
_lowerCamelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_feature_extractor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : int = processor.batch_decode(A )
_lowerCamelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : int = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 437
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase = TextDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_text_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = features.copy() if features else default_expected_features
lowerCamelCase = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase = TextDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_text_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , split=UpperCAmelCase__ ).read()
_check_text_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase = text_path
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase = [text_path]
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_text_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=("train",) ):
"""simple docstring"""
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for split in splits:
lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase = TextDatasetReader({"train": text_path} , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_text_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCamelCase = {"text": "string"}
lowerCamelCase = features.copy() if features else default_expected_features
lowerCamelCase = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase = TextDatasetReader({"train": text_path} , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_text_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if split:
lowerCamelCase = {split: text_path}
else:
lowerCamelCase = "train"
lowerCamelCase = {"train": text_path, "test": text_path}
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"text": "string"}
lowerCamelCase = TextDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_text_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 710
|
from math import pi
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 484
| 0
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : Dict ) -> List[str]: # noqa: E741
'''simple docstring'''
lowercase =len(lowercase_ )
lowercase =0
lowercase =[0] * n
lowercase =[False] * n
lowercase =[False] * n
def dfs(lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
if parent == root:
out_edge_count += 1
lowercase =True
lowercase =at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase =dfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase =min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase =True
# AP found via cycle
if at == low[to]:
lowercase =True
else:
lowercase =min(low[at] , lowercase_ )
return out_edge_count
for i in range(lowercase_ ):
if not visited[i]:
lowercase =0
lowercase =dfs(lowercase_ , lowercase_ , -1 , lowercase_ )
lowercase =out_edge_count > 1
for x in range(len(lowercase_ ) ):
if is_art[x] is True:
print(lowercase_ )
# Adjacency list of graph
_UpperCAmelCase : Any = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 72
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 72
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = DanceDiffusionPipeline
__UpperCAmelCase : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__UpperCAmelCase : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
def __snake_case ( self : str ) -> str:
torch.manual_seed(0 )
__snake_case : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase , use_timestep_embedding=lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
__snake_case : Optional[int] = IPNDMScheduler()
__snake_case : str = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __snake_case ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int=0 ) -> Optional[Any]:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Union[str, Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Dict = self.get_dummy_components()
__snake_case : Tuple = DanceDiffusionPipeline(**lowerCamelCase )
__snake_case : List[Any] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase )
__snake_case : List[Any] = output.audios
__snake_case : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__snake_case : Dict = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __snake_case ( self : Dict ) -> Optional[int]:
return super().test_save_load_local()
@skip_mps
def __snake_case ( self : int ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
return super().test_save_load_optional_components()
@skip_mps
def __snake_case ( self : Dict ) -> str:
return super().test_attention_slicing_forward_pass()
def __snake_case ( self : Union[str, Any] ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : int = torch_device
__snake_case : Dict = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
__snake_case : Optional[int] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe(generator=lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
__snake_case : Union[str, Any] = output.audios
__snake_case : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__snake_case : Tuple = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : int ) -> str:
__snake_case : int = torch_device
__snake_case : Optional[int] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
__snake_case : Tuple = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[Any] = torch.manual_seed(0 )
__snake_case : Optional[int] = pipe(generator=lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_96 )
__snake_case : Optional[int] = output.audios
__snake_case : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__snake_case : Optional[int] = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 203
|
import unittest
from transformers import DonutProcessor
_snake_case : Dict = "naver-clova-ix/donut-base"
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict ) -> int:
__snake_case : Any = DonutProcessor.from_pretrained(lowerCamelCase )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : Union[str, Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__snake_case : Union[str, Any] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__snake_case : Tuple = self.processor.tokenajson(lowerCamelCase )
self.assertDictEqual(lowerCamelCase , lowerCamelCase )
| 203
| 1
|
from manim import *
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def __A ( self: List[str] ) -> Optional[int]:
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = Text('''CPU''' , font_size=24 )
_A = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = Text('''GPU''' , font_size=24 )
_A = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = Text('''Model''' , font_size=24 )
_A = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_A = []
_A = []
for i, rect in enumerate(lowerCamelCase__ ):
_A = fill.copy().set_fill(lowerCamelCase__ , opacity=0.8 )
target.move_to(lowerCamelCase__ )
model_arr.append(lowerCamelCase__ )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_A = Text('''Disk''' , font_size=24 )
_A = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_A = MarkupText(
f"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
_A = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) )
_A = Square(0.3 )
input.set_fill(lowerCamelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase__ , buff=0.5 )
self.play(Write(lowerCamelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase__ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase__ ) )
self.play(FadeOut(lowerCamelCase__ ) )
_A = Arrow(start=lowerCamelCase__ , end=lowerCamelCase__ , color=lowerCamelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
_A = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(lowerCamelCase__ ) , Circumscribe(model_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(lowerCamelCase__ , run_time=0.5 ) , MoveToTarget(lowerCamelCase__ , run_time=0.5 ) , FadeIn(lowerCamelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase__ , **lowerCamelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase__ , **lowerCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase__ ) , FadeOut(lowerCamelCase__ , run_time=0.5 ) , )
_A = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , MoveToTarget(lowerCamelCase__ ) )
self.wait()
| 484
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: int = 'openai-gpt'
SCREAMING_SNAKE_CASE: List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase__=40_478 , lowerCamelCase__=512 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0_2 , lowerCamelCase__="cls_index" , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=0.1 , **lowerCamelCase__ , ):
lowerCAmelCase_: Union[str, Any] = vocab_size
lowerCAmelCase_: List[Any] = n_positions
lowerCAmelCase_: Tuple = n_embd
lowerCAmelCase_: Optional[int] = n_layer
lowerCAmelCase_: Optional[int] = n_head
lowerCAmelCase_: int = afn
lowerCAmelCase_: str = resid_pdrop
lowerCAmelCase_: Optional[int] = embd_pdrop
lowerCAmelCase_: Optional[int] = attn_pdrop
lowerCAmelCase_: Dict = layer_norm_epsilon
lowerCAmelCase_: List[Any] = initializer_range
lowerCAmelCase_: Union[str, Any] = summary_type
lowerCAmelCase_: Any = summary_use_proj
lowerCAmelCase_: Dict = summary_activation
lowerCAmelCase_: Dict = summary_first_dropout
lowerCAmelCase_: List[Any] = summary_proj_to_labels
super().__init__(**lowerCamelCase__ )
| 613
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = (DEISMultistepScheduler,)
__lowerCamelCase = (("num_inference_steps", 25),)
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= dict(self.forward_default_kwargs )
lowercase__ : str= kwargs.pop("num_inference_steps" , snake_case__ )
lowercase__ : Optional[Any]= self.dummy_sample
lowercase__ : Tuple= 0.1 * sample
lowercase__ : str= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[Any]= self.get_scheduler_config(**snake_case__ )
lowercase__ : List[Any]= scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowercase__ : Any= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowercase__ : Union[str, Any]= scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowercase__ : int= dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__, lowercase__ : Optional[Any]= sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowercase__ : str= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase__ : Union[str, Any]= new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= dict(self.forward_default_kwargs )
lowercase__ : Dict= kwargs.pop("num_inference_steps" , snake_case__ )
lowercase__ : str= self.dummy_sample
lowercase__ : Tuple= 0.1 * sample
lowercase__ : Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[Any]= self.get_scheduler_config()
lowercase__ : Dict= scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Optional[Any]= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowercase__ : Tuple= scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Dict= dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Union[str, Any]= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase__ : Union[str, Any]= new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if scheduler is None:
lowercase__ : str= self.scheduler_classes[0]
lowercase__ : int= self.get_scheduler_config(**snake_case__ )
lowercase__ : Dict= scheduler_class(**snake_case__ )
lowercase__ : int= self.scheduler_classes[0]
lowercase__ : List[Any]= self.get_scheduler_config(**snake_case__ )
lowercase__ : List[Any]= scheduler_class(**snake_case__ )
lowercase__ : Any= 10
lowercase__ : Any= self.dummy_model()
lowercase__ : int= self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[str]= model(snake_case__ , snake_case__ )
lowercase__ : int= scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= dict(self.forward_default_kwargs )
lowercase__ : int= kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict= self.get_scheduler_config()
lowercase__ : Optional[int]= scheduler_class(**snake_case__ )
lowercase__ : Any= self.dummy_sample
lowercase__ : List[str]= 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowercase__ : Any= num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Any= [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase__ : Union[str, Any]= dummy_past_residuals[: scheduler.config.solver_order]
lowercase__ : int= scheduler.timesteps[5]
lowercase__ : int= scheduler.timesteps[6]
lowercase__ : Union[str, Any]= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase__ : Dict= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase__ : Union[str, Any]= DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase__ : Any= self.full_loop(scheduler=snake_case__ )
lowercase__ : Dict= torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
lowercase__ : Tuple= DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase__ : List[str]= DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase__ : Optional[int]= UniPCMultistepScheduler.from_config(scheduler.config )
lowercase__ : Optional[int]= DEISMultistepScheduler.from_config(scheduler.config )
lowercase__ : Dict= self.full_loop(scheduler=snake_case__ )
lowercase__ : List[Any]= torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowercase__ : int= self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.full_loop()
lowercase__ : Union[str, Any]= torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.full_loop(prediction_type="v_prediction" )
lowercase__ : List[Any]= torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.scheduler_classes[0]
lowercase__ : Tuple= self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowercase__ : Optional[int]= scheduler_class(**snake_case__ )
lowercase__ : Tuple= 10
lowercase__ : Dict= self.dummy_model()
lowercase__ : Optional[Any]= self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Union[str, Any]= model(snake_case__ , snake_case__ )
lowercase__ : Tuple= scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 85
|
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 1
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
_UpperCAmelCase =RemBertConfig.from_json_file(UpperCamelCase_ )
print("Building PyTorch model from configuration: {}".format(str(UpperCamelCase_ ) ) )
_UpperCAmelCase =RemBertModel(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCamelCase_ ) )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 408
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : List[Any] , A_ : Any=7 , A_ : Optional[int]=3 , A_ : List[Any]=30 , A_ : Tuple=400 , A_ : str=True , A_ : Any=None , A_ : List[Any]=True , A_ : int=[0.5, 0.5, 0.5] , A_ : str=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , A_ : Optional[int]=1 / 255 , A_ : List[Any]=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_pad
def lowercase ( self : Tuple ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase ( self : List[Any] , A_ : str , A_ : Union[str, Any]=False ) -> List[str]:
if not batched:
__snake_case = image_inputs[0]
if isinstance(A_ , Image.Image ):
__snake_case , __snake_case = image.size
else:
__snake_case , __snake_case = image.shape[1], image.shape[2]
if w < h:
__snake_case = int(self.size['''shortest_edge'''] * h / w )
__snake_case = self.size['''shortest_edge''']
elif w > h:
__snake_case = self.size['''shortest_edge''']
__snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
__snake_case = self.size['''shortest_edge''']
__snake_case = self.size['''shortest_edge''']
else:
__snake_case = []
for image in image_inputs:
__snake_case , __snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case = max(A_ , key=lambda A_ : item[0] )[0]
__snake_case = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = DetaImageProcessor if is_vision_available() else None
def lowercase ( self : Any ) -> int:
__snake_case = DetaImageProcessingTester(self )
@property
def lowercase ( self : Dict ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Union[str, Any] ) -> str:
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''do_rescale''' ) )
self.assertTrue(hasattr(A_ , '''do_pad''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , A_ )
def lowercase ( self : List[Any] ) -> List[Any]:
pass
def lowercase ( self : List[Any] ) -> List[Any]:
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
__snake_case = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self : List[Any] ) -> Any:
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors='''pt''' ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self : List[str] ) -> Any:
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors='''pt''' ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase ( self : List[Any] ) -> int:
# prepare image and target
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__snake_case = json.loads(f.read() )
__snake_case = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__snake_case = DetaImageProcessor()
__snake_case = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
__snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
__snake_case = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__snake_case = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
__snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
__snake_case = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
__snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
__snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
__snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
__snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
__snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def lowercase ( self : Optional[Any] ) -> str:
# prepare image, target and masks_path
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__snake_case = json.loads(f.read() )
__snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__snake_case = DetaImageProcessor(format='''coco_panoptic''' )
__snake_case = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
__snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
__snake_case = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__snake_case = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
__snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
__snake_case = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
__snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
__snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
__snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
__snake_case = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
__snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
__snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 93
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__snake_case = tempfile.mkdtemp()
__snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__snake_case = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
__snake_case = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def lowercase ( self : str , **A_ : str ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[int] , **A_ : Optional[int] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Tuple , **A_ : Any ) -> Any:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[Any] ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ) -> Dict:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def lowercase ( self : int ) -> Union[str, Any]:
__snake_case = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
__snake_case = self.get_image_processor(do_normalize=A_ )
__snake_case = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase ( self : Optional[int] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : str ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : str ) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def lowercase ( self : int ) -> Optional[Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''Alexandra,T-shirt的价格是15便士。'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 93
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "lilt"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase=None , _lowerCAmelCase=4 , _lowerCAmelCase=1024 , **_lowerCAmelCase , ) -> Dict:
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = channel_shrink_ratio
_lowerCAmelCase = max_ad_position_embeddings
| 18
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ) -> int:
a_ : Any = 1
a_ : str = 3
a_ : Dict = (3_2, 3_2)
a_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def A ( self ) -> Tuple:
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A ( self ) -> Any:
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> Optional[int]:
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
a_ : Optional[int] = torch.ones([0] )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def A ( self ) -> Optional[Any]:
a_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.dummy_cond_unet
a_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
a_ : List[str] = self.dummy_vae
a_ : List[str] = self.dummy_text_encoder
a_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : List[str] = output.images
a_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Optional[Any]:
a_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Any = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : str = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Dict = "A painting of a squirrel eating a burger"
a_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : Union[str, Any] = output.images
a_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -3:, -3:, -1]
a_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> List[str]:
a_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a_ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A ( self ) -> Union[str, Any]:
a_ : Tuple = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Tuple = self.dummy_vae
a_ : Optional[Any] = self.dummy_text_encoder
a_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a_ : Union[str, Any] = unet.half()
a_ : Optional[Any] = vae.half()
a_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
a_ : Tuple = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : int = "A painting of a squirrel eating a burger"
a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[Any]:
a_ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : str = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a_ : Optional[int] = 4_0_0_3_6_6_0_3_4_6
a_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
a_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : Any = image[0, -3:, -3:, -1]
a_ : List[str] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a_ : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : List[str] = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Dict:
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "padme amidala taking a bath artwork, safe for work, no nudity"
a_ : List[Any] = 2_7_3_4_9_7_1_7_5_5
a_ : Tuple = 7
a_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : str = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> int:
a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a_ : List[str] = 1_0_4_4_3_5_5_2_3_4
a_ : Dict = 1_2
a_ : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : int = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 473
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''YolosFeatureExtractor''']
__lowercase = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 452
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = UnCLIPImageVariationPipeline
UpperCAmelCase__ = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCAmelCase__ = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCAmelCase__ = False
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
torch.manual_seed(0)
A__ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
A__ = UnCLIPTextProjModel(**UpperCAmelCase__)
return model
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
A__ = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
A__ = UNetaDConditionModel(**UpperCAmelCase__)
return model
@property
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(1)
A__ = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.dummy_decoder
A__ = self.dummy_text_proj
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_super_res_first
A__ = self.dummy_super_res_last
A__ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
A__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_000 , )
A__ = CLIPImageProcessor(crop_size=32 , size=32)
A__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=True) ->Tuple:
'''simple docstring'''
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__)).to(UpperCAmelCase__)
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
if pil_image:
A__ = input_image * 0.5 + 0.5
A__ = input_image.clamp(0 , 1)
A__ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
A__ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase__)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = pipe(**UpperCAmelCase__)
A__ = output.images
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = pipe(
**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = pipe(**UpperCAmelCase__)
A__ = output.images
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = pipe(
**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
A__ = pipe(**UpperCAmelCase__)
A__ = output.images
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
A__ = pipe(
**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
A__ = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = torch.device('''cpu''')
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 1
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(0)
A__ = pipe.decoder.dtype
A__ = 1
A__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
A__ = pipe.prepare_latents(
UpperCAmelCase__ , dtype=UpperCAmelCase__ , device=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , scheduler=DummyScheduler())
A__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
A__ = pipe.prepare_latents(
UpperCAmelCase__ , dtype=UpperCAmelCase__ , device=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , scheduler=DummyScheduler())
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
A__ = pipe(
**UpperCAmelCase__ , decoder_latents=UpperCAmelCase__ , super_res_latents=UpperCAmelCase__).images
A__ = self.get_dummy_inputs(UpperCAmelCase__ , pil_image=UpperCAmelCase__)
# Don't pass image, instead pass embedding
A__ = pipeline_inputs.pop('''image''')
A__ = pipe.image_encoder(UpperCAmelCase__).image_embeds
A__ = pipe(
**UpperCAmelCase__ , decoder_latents=UpperCAmelCase__ , super_res_latents=UpperCAmelCase__ , image_embeddings=UpperCAmelCase__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
A__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , expected_max_diff=UpperCAmelCase__)
@skip_mps
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = torch_device == '''cpu'''
A__ = True
A__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , additional_params_copy_to_batched_inputs=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
A__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase__ , additional_params_copy_to_batched_inputs=UpperCAmelCase__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase__)
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''')
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''')
A__ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa)
A__ = pipeline.to(UpperCAmelCase__)
pipeline.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = pipeline(
UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ , 15)
| 87
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , a__ ).groups()[0]
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : int , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = file_names
_UpperCamelCase = image_transform
_UpperCamelCase = label_to_id
def __len__( self : int) -> Optional[int]:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self : int , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = self.file_names[idx]
_UpperCamelCase = PIL.Image.open(lowercase_)
_UpperCamelCase = raw_image.convert("RGB")
if self.image_transform is not None:
_UpperCamelCase = self.image_transform(lowercase_)
_UpperCamelCase = extract_label(lowercase_)
if self.label_to_id is not None:
_UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
if args.with_tracking:
_UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config["lr"]
_UpperCamelCase = int(config["num_epochs"] )
_UpperCamelCase = int(config["seed"] )
_UpperCamelCase = int(config["batch_size"] )
_UpperCamelCase = config["image_size"]
if not isinstance(a__ , (list, tuple) ):
_UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase = os.path.split(a__ )[-1].split("." )[0]
accelerator.init_trackers(a__ , a__ )
# Grab all the image filenames
_UpperCamelCase = [os.path.join(args.data_dir , a__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_UpperCamelCase = [extract_label(a__ ) for fname in file_names]
_UpperCamelCase = list(set(a__ ) )
id_to_label.sort()
_UpperCamelCase = {lbl: i for i, lbl in enumerate(a__ )}
# Set the seed before splitting the data.
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# Split our filenames between train and validation
_UpperCamelCase = np.random.permutation(len(a__ ) )
_UpperCamelCase = int(0.8 * len(a__ ) )
_UpperCamelCase = random_perm[:cut]
_UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase = Compose([RandomResizedCrop(a__ , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=a__ , label_to_id=a__ )
# For evaluation, we use a deterministic Resize
_UpperCamelCase = Compose([Resize(a__ ), ToTensor()] )
_UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=a__ , label_to_id=a__ )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
_UpperCamelCase = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = create_model("resnet50d" , pretrained=a__ , num_classes=len(a__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase = False
for param in model.get_classifier().parameters():
_UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase = OneCycleLR(optimizer=a__ , max_lr=a__ , epochs=a__ , steps_per_epoch=len(a__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase = os.path.splitext(a__ )[0]
if "epoch" in training_difference:
_UpperCamelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
_UpperCamelCase = None
else:
_UpperCamelCase = int(training_difference.replace("step_" , "" ) )
_UpperCamelCase = resume_step // len(a__ )
resume_step -= starting_epoch * len(a__ )
# Now we train the model
for epoch in range(a__ , a__ ):
model.train()
if args.with_tracking:
_UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase = accelerator.skip_first_batches(a__ , a__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch["image"] - mean) / std
_UpperCamelCase = model(a__ )
_UpperCamelCase = torch.nn.functional.cross_entropy(a__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(a__ , a__ ):
_UpperCamelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
model.eval()
_UpperCamelCase = 0
_UpperCamelCase = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch["image"] - mean) / std
with torch.no_grad():
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
_UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(a__ ),
"epoch": epoch,
} , step=a__ , )
if checkpointing_steps == "epoch":
_UpperCamelCase = f'epoch_{epoch}'
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=a__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=a__ , default=a__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=a__ , default=a__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=a__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=a__ , default=a__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=a__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 547
| 0
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
__SCREAMING_SNAKE_CASE :Optional[Any] = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
__SCREAMING_SNAKE_CASE :Any = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Any ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
_UpperCAmelCase = np.array(__lowercase )
_UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
_UpperCAmelCase = en_sentvecs - np.mean(__lowercase , axis=0 )
_UpperCAmelCase = in_sentvecs - np.mean(__lowercase , axis=0 )
_UpperCAmelCase = cdist(__lowercase , __lowercase , "cosine" )
_UpperCAmelCase = np.array(range(__lowercase ) )
_UpperCAmelCase = sim.argsort(axis=1 )[:, :10]
_UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Optional[int] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowercase ( self : Dict , snake_case_ : Any , snake_case_ : int ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case_ , snake_case_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 719
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 418
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 4_000_000 ) -> int:
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 504
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding='utf-8' ,check=lowerCamelCase__ ,)
assert hasattr(self ,'env' )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
UpperCAmelCase__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=lowerCamelCase__ ,instance_count=lowerCamelCase__ ,instance_type=self.instance_type ,debugger_hook_config=lowerCamelCase__ ,hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=lowerCamelCase__ ,py_version='py36' ,)
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Dict ):
TrainingJobAnalytics(lowerCamelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ):
# create estimator
UpperCAmelCase__ = self.create_estimator(lowerCamelCase__ )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,lowerCamelCase__ )
| 708
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase_ ( a_ ):
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """depth_multiplier""" ) )
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=3 , snake_case__=32 , snake_case__=0.25 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=12_80 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = depth_multiplier
UpperCAmelCase = depth_divisible_by
UpperCAmelCase = min_depth
UpperCAmelCase = expand_ratio
UpperCAmelCase = tf_padding
UpperCAmelCase = output_stride
UpperCAmelCase = first_layer_is_expansion
UpperCAmelCase = finegrained_output
UpperCAmelCase = hidden_act
UpperCAmelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileNetVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Union[str, Any] = False
_A : int = False
_A : Tuple = False
_A : List[str] = False
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaModelTester(self )
UpperCAmelCase = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_UpperCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
UpperCAmelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 16
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_UpperCAmelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCAmelCase = model.to(_UpperCAmelCase )
UpperCAmelCase = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_UpperCAmelCase )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 673
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 158
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __snake_case ( UpperCamelCase , UpperCamelCase=0.9_99 , UpperCamelCase="cosine" , ) -> Dict:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
a__ = []
for i in range(UpperCamelCase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) )
return torch.tensor(UpperCamelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
snake_case__ : int = [e.name for e in KarrasDiffusionSchedulers]
snake_case__ : List[str] = 2
@register_to_config
def __init__( self :Any , __magic_name__ :int = 1000 , __magic_name__ :float = 0.00_085 , __magic_name__ :float = 0.012 , __magic_name__ :str = "linear" , __magic_name__ :Optional[Union[np.ndarray, List[float]]] = None , __magic_name__ :str = "epsilon" , __magic_name__ :str = "linspace" , __magic_name__ :int = 0 , ) -> Optional[Any]:
'''simple docstring'''
if trained_betas is not None:
a__ = torch.tensor(__magic_name__ , dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(__magic_name__ , __magic_name__ , __magic_name__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __magic_name__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(__magic_name__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__magic_name__ , __magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :int , __magic_name__ :int , __magic_name__ :Tuple=None ) -> Dict:
'''simple docstring'''
if schedule_timesteps is None:
a__ = self.timesteps
a__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a__ = 1 if len(__magic_name__ ) > 1 else 0
else:
a__ = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
a__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _UpperCamelCase ( self :str ) -> Tuple:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _UpperCamelCase ( self :str , __magic_name__ :torch.FloatTensor , __magic_name__ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
a__ = self.index_for_timestep(__magic_name__ )
if self.state_in_first_order:
a__ = self.sigmas[step_index]
else:
a__ = self.sigmas_interpol[step_index]
a__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :Union[str, torch.device] = None , __magic_name__ :Optional[int] = None , ) -> int:
'''simple docstring'''
a__ = num_inference_steps
a__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a__ = np.linspace(0 , num_train_timesteps - 1 , __magic_name__ , dtype=__magic_name__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 , __magic_name__ ) * step_ratio).round()[::-1].copy().astype(__magic_name__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(__magic_name__ , 0 , -step_ratio )).round().copy().astype(__magic_name__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
a__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a__ = torch.from_numpy(np.log(__magic_name__ ) ).to(__magic_name__ )
a__ = np.interp(__magic_name__ , np.arange(0 , len(__magic_name__ ) ) , __magic_name__ )
a__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a__ = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ )
# interpolate sigmas
a__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__magic_name__ ).startswith('''mps''' ):
# mps does not support float64
a__ = torch.from_numpy(__magic_name__ ).to(__magic_name__ , dtype=torch.floataa )
else:
a__ = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
# interpolate timesteps
a__ = self.sigma_to_t(__magic_name__ ).to(__magic_name__ , dtype=timesteps.dtype )
a__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a__ = torch.cat([timesteps[:1], interleaved_timesteps] )
a__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a__ = defaultdict(__magic_name__ )
def _UpperCamelCase ( self :Dict , __magic_name__ :str ) -> Union[str, Any]:
'''simple docstring'''
a__ = sigma.log()
# get distribution
a__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a__ = low_idx + 1
a__ = self.log_sigmas[low_idx]
a__ = self.log_sigmas[high_idx]
# interpolate sigmas
a__ = (low - log_sigma) / (low - high)
a__ = w.clamp(0 , 1 )
# transform interpolation to time range
a__ = (1 - w) * low_idx + w * high_idx
a__ = t.view(sigma.shape )
return t
@property
def _UpperCamelCase ( self :List[Any] ) -> int:
'''simple docstring'''
return self.sample is None
def _UpperCamelCase ( self :Dict , __magic_name__ :Union[torch.FloatTensor, np.ndarray] , __magic_name__ :Union[float, torch.FloatTensor] , __magic_name__ :Union[torch.FloatTensor, np.ndarray] , __magic_name__ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
a__ = self.index_for_timestep(__magic_name__ )
# advance index counter by 1
a__ = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a__ = self.sigmas[step_index]
a__ = self.sigmas_interpol[step_index + 1]
a__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a__ = self.sigmas[step_index - 1]
a__ = self.sigmas_interpol[step_index]
a__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a__ = 0
a__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a__ = sigma_interpol - sigma_hat
# store for 2nd order step
a__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a__ = sigma_next - sigma_hat
a__ = self.sample
a__ = None
a__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def _UpperCamelCase ( self :Dict , __magic_name__ :torch.FloatTensor , __magic_name__ :torch.FloatTensor , __magic_name__ :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
a__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__magic_name__ ):
# mps does not support float64
a__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a__ = self.timesteps.to(original_samples.device )
a__ = timesteps.to(original_samples.device )
a__ = [self.index_for_timestep(__magic_name__ , __magic_name__ ) for t in timesteps]
a__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a__ = sigma.unsqueeze(-1 )
a__ = original_samples + noise * sigma
return noisy_samples
def __len__( self :Any ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 158
| 1
|
"""simple docstring"""
from typing import List
import numpy as np
def A ( __snake_case: dict ) -> int:
"""simple docstring"""
__magic_name__ = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__magic_name__ = max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def A ( __snake_case: int , __snake_case: int ) -> List[range]:
"""simple docstring"""
__magic_name__ = []
for group_idx in range(__snake_case ):
__magic_name__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ = range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def A ( __snake_case: dict , __snake_case: int ) -> List[dict]:
"""simple docstring"""
__magic_name__ = _number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
__magic_name__ = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def A ( __snake_case: List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( __snake_case: np.random.Generator , __snake_case: dict ) -> dict:
"""simple docstring"""
__magic_name__ = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
__magic_name__ = {}
for size in list_sizes:
__magic_name__ = list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ = dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
__magic_name__ = [value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 545
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case : Tuple = None
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case : Dict = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
snake_case : int = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
snake_case : List[str] = """▁"""
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = BigBirdTokenizer
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase = []
def __init__( self : Optional[int] , UpperCamelCase_ : int=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Tuple="<s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Optional[Any]="[SEP]" , UpperCamelCase_ : int="[MASK]" , UpperCamelCase_ : Tuple="[CLS]" , **UpperCamelCase_ : Optional[Any] , ):
'''simple docstring'''
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def a__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__magic_name__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 545
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase_ : Dict = numpy.array([0, 0])
lowerCAmelCase_ : int = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase_ : Union[str, Any] = numpy.array([1, 0])
lowerCAmelCase_ : Tuple = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase ( A : list[numpy.ndarray] , A : int ):
SCREAMING_SNAKE_CASE : int = initial_vectors
for _ in range(A ):
SCREAMING_SNAKE_CASE : Dict = iteration_step(A )
return vectors
def UpperCAmelCase ( A : list[numpy.ndarray] ):
SCREAMING_SNAKE_CASE : List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
SCREAMING_SNAKE_CASE : Any = vectors[i + 1]
new_vectors.append(A )
SCREAMING_SNAKE_CASE : str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase ( A : numpy.ndarray , A : float ):
SCREAMING_SNAKE_CASE : List[str] = numpy.radians(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = numpy.cos(A ), numpy.sin(A )
SCREAMING_SNAKE_CASE : Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A , A )
def UpperCAmelCase ( A : list[numpy.ndarray] ):
SCREAMING_SNAKE_CASE : Any = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = zip(*A )
plt.plot(A , A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : List[str] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 464
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : Tuple = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
print('Generating prime p...' )
lowerCamelCase_ = rabinMiller.generate_large_prime(lowercase )
print('Generating prime q...' )
lowerCamelCase_ = rabinMiller.generate_large_prime(lowercase )
lowerCamelCase_ = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowerCamelCase_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowercase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowerCamelCase_ = cryptoMath.find_mod_inverse(lowercase , (p - 1) * (q - 1) )
lowerCamelCase_ = (n, e)
lowerCamelCase_ = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCamelCase_ , lowerCamelCase_ = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 70
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=3 , lowercase_=224 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ) -> int:
lowerCAmelCase : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Dict = min_resolution
lowerCAmelCase : List[Any] = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Optional[Any] = size
lowerCAmelCase : Optional[int] = do_normalize
lowerCAmelCase : int = image_mean
lowerCAmelCase : List[str] = image_std
def _snake_case ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: str = ViTImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = EfficientFormerImageProcessorTester(self )
@property
def _snake_case ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processor
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Tuple = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''deit'''
def __init__( self ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=1_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3_0_7_2 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1E-12 ,lowerCamelCase_=2_2_4 ,lowerCamelCase_=1_6 ,lowerCamelCase_=3 ,lowerCamelCase_=True ,lowerCamelCase_=1_6 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(**lowerCamelCase_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
| 617
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''summary''': Value('''string''' )} )
_lowerCamelCase = "text"
_lowerCamelCase = "summary"
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 617
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __UpperCamelCase :
def __init__( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Optional[int]=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : int ) -> Any:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ )
__lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
__lowercase = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = True
__lowercase = True
__lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["""hidden_states"""][0]
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __a , __a , __a , unittest.TestCase ):
__snake_case :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__snake_case :Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__snake_case :Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Union[str, Any] = False
__snake_case :Optional[int] = False
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = OpenLlamaModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*snake_case__ )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """single_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """multi_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
__lowercase = original_model(snake_case__ ).last_hidden_state
__lowercase = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
__lowercase = scaled_model(snake_case__ ).last_hidden_state
__lowercase = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 18
|
import argparse
import copy
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = {}
with open(a_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( a_ , a_ ) -> Dict:
with open(a_ ) as f:
lowerCAmelCase_ = f.read(1 )
lowerCAmelCase_ = start_node
lowerCAmelCase_ = []
lowerCAmelCase_ = start_node
lowerCAmelCase_ = 0
while visiting not in first_solution:
lowerCAmelCase_ = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a_ ) and k[0] not in first_solution:
lowerCAmelCase_ = k[1]
lowerCAmelCase_ = k[0]
first_solution.append(a_ )
lowerCAmelCase_ = distance_of_first_solution + int(a_ )
lowerCAmelCase_ = best_node
first_solution.append(a_ )
lowerCAmelCase_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( a_ , a_ ) -> str:
lowerCAmelCase_ = []
for n in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
for kn in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
if n == kn:
continue
lowerCAmelCase_ = copy.deepcopy(a_ )
lowerCAmelCase_ = kn
lowerCAmelCase_ = n
lowerCAmelCase_ = 0
for k in _tmp[:-1]:
lowerCAmelCase_ = _tmp[_tmp.index(a_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase_ = distance + int(i[1] )
_tmp.append(a_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = first_solution
lowerCAmelCase_ = []
lowerCAmelCase_ = distance_of_first_solution
lowerCAmelCase_ = solution
while count <= iters:
lowerCAmelCase_ = find_neighborhood(a_ , a_ )
lowerCAmelCase_ = 0
lowerCAmelCase_ = neighborhood[index_of_best_solution]
lowerCAmelCase_ = len(a_ ) - 1
lowerCAmelCase_ = False
while not found:
lowerCAmelCase_ = 0
while i < len(a_ ):
if best_solution[i] != solution[i]:
lowerCAmelCase_ = best_solution[i]
lowerCAmelCase_ = solution[i]
break
lowerCAmelCase_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase_ = True
lowerCAmelCase_ = best_solution[:-1]
lowerCAmelCase_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase_ = cost
lowerCAmelCase_ = solution
else:
lowerCAmelCase_ = index_of_best_solution + 1
lowerCAmelCase_ = neighborhood[index_of_best_solution]
if len(a_ ) >= size:
tabu_list.pop(0 )
lowerCAmelCase_ = count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( a_=None ) -> Optional[int]:
lowerCAmelCase_ = generate_neighbours(args.File )
lowerCAmelCase_ , lowerCAmelCase_ = generate_first_solution(
args.File , a_ )
lowerCAmelCase_ , lowerCAmelCase_ = tabu_search(
a_ , a_ , a_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 318
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 711
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , UpperCamelCase_=1000 , ) -> Union[str, Any]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = range_bbox
def _snake_case ( self ) -> Optional[int]:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
snake_case__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ = bbox[i, j, 3]
snake_case__ = bbox[i, j, 1]
snake_case__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ = bbox[i, j, 2]
snake_case__ = bbox[i, j, 0]
snake_case__ = t
snake_case__ = tf.convert_to_tensor(UpperCamelCase_ )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
snake_case__ = TFLayoutLMModel(config=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
snake_case__ = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
snake_case__ = self.num_labels
snake_case__ = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
snake_case__ = self.num_labels
snake_case__ = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
snake_case__ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
snake_case__ = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Tuple:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = 10
def _snake_case ( self ) -> Tuple:
snake_case__ = TFLayoutLMModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _snake_case ( self ) -> Optional[int]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def _snake_case ( self ) -> Any:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _snake_case ( self ) -> int:
pass
def __lowerCamelCase ( ) ->List[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
snake_case__ = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
snake_case__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
snake_case__ = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
snake_case__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
snake_case__ = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
snake_case__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case__ = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
snake_case__ = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
snake_case__ = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase_ , atol=1E-3 ) )
@slow
def _snake_case ( self ) -> Optional[Any]:
# initialize model with randomly initialized sequence classification head
snake_case__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case__ = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
snake_case__ = outputs.loss
snake_case__ = (2,)
self.assertEqual(loss.shape , UpperCamelCase_ )
# test the shape of the logits
snake_case__ = outputs.logits
snake_case__ = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def _snake_case ( self ) -> Optional[int]:
# initialize model with randomly initialized token classification head
snake_case__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case__ = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
# test the shape of the logits
snake_case__ = outputs.logits
snake_case__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def _snake_case ( self ) -> Dict:
# initialize model with randomly initialized token classification head
snake_case__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case__ = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the shape of the logits
snake_case__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase_ )
| 368
|
'''simple docstring'''
import os
def __lowerCamelCase ( UpperCAmelCase_ = "input.txt" ) ->int:
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as input_file:
snake_case__ = [
[int(UpperCAmelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = len(matrix[0] )
snake_case__ = [[-1 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
snake_case__ = matrix[i][0]
for j in range(1 , UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
snake_case__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_ ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = ''
snake_case__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case__ = None # compression type in fsspec. ex: "gzip"
snake_case__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] ,lowerCamelCase__ : Any = "" ,lowerCamelCase__ : Tuple = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : str ):
super().__init__(self ,**_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ = fsspec.open(
_a ,mode='rb' ,protocol=_a ,compression=self.compression ,client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
UpperCAmelCase__ = os.path.basename(self.file.path.split('::' )[0] )
UpperCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ = None
@classmethod
def __lowerCAmelCase ( cls : Tuple ,lowerCamelCase__ : Union[str, Any] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip('/' )
def __lowerCAmelCase ( self : Optional[Any] ):
if self.dir_cache is None:
UpperCAmelCase__ = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCAmelCase__ = {f['name']: f}
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : str ):
return self.file.open().read()
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict = "rb" ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = 'bz2'
snake_case__ = 'bz2'
snake_case__ = '.bz2'
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = 'gzip'
snake_case__ = 'gzip'
snake_case__ = '.gz'
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = 'lz4'
snake_case__ = 'lz4'
snake_case__ = '.lz4'
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = 'xz'
snake_case__ = 'xz'
snake_case__ = '.xz'
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = 'zstd'
snake_case__ = 'zstd'
snake_case__ = '.zst'
def __init__( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict = "rb" ,lowerCamelCase__ : List[str] = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : str = DEFAULT_BLOCK_SIZE ,**lowerCamelCase__ : List[Any] ,):
super().__init__(
fo=_a ,mode=_a ,target_protocol=_a ,target_options=_a ,block_size=_a ,**_a ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ = self.file.__enter__
class snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = file_
def __enter__( self : List[str] ):
self._file.__enter__()
return self
def __exit__( self : Dict ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
self._file.__exit__(*_a ,**_a )
def __iter__( self : Any ):
return iter(self._file )
def __lowerCAmelCase ( self : Any ):
return next(self._file )
def __getattr__( self : str ,lowerCamelCase__ : Optional[Any] ):
return getattr(self._file ,_a )
def fixed_enter(*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Tuple ):
return WrappedFile(_enter(*_a ,**_a ) )
UpperCAmelCase__ = fixed_enter
| 721
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 632
| 0
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase__ : str = "scheduler_config.json"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 5
UpperCamelCase = 6
UpperCamelCase = 7
UpperCamelCase = 8
UpperCamelCase = 9
UpperCamelCase = 10
UpperCamelCase = 11
UpperCamelCase = 12
UpperCamelCase = 13
UpperCamelCase = 14
@dataclass
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
class lowercase__:
'''simple docstring'''
UpperCamelCase = SCHEDULER_CONFIG_NAME
UpperCamelCase = []
UpperCamelCase = True
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Dict[str, Any] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :Tuple , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase_ , subfolder=lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , return_commit_hash=lowerCamelCase_ , **lowerCamelCase_ , )
return cls.from_config(lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
self.save_config(save_directory=lowerCamelCase_ , push_to_hub=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls :Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list(set([cls.__name__] + cls._compatibles ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(__name__.split('''.''' )[0] )
SCREAMING_SNAKE_CASE : List[Any] = [
getattr(lowerCamelCase_ , lowerCamelCase_ ) for c in compatible_classes_str if hasattr(lowerCamelCase_ , lowerCamelCase_ )
]
return compatible_classes
| 698
|
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 713
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = FunnelTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = '''UNwant\u00E9d,running'''
__magic_name__ = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' )
__magic_name__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 190
| 0
|
from pathlib import Path
import fire
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowercase__ : Tuple = Path(lowerCAmelCase__ )
lowercase__ : Any = Path(lowerCAmelCase__ )
dest_dir.mkdir(exist_ok=lowerCAmelCase__ )
for path in src_dir.iterdir():
lowercase__ : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ : Tuple = dest_dir.joinpath(path.name )
print(lowerCAmelCase__ )
dest_path.open("w" ).write("\n".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 397
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 642
| 0
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A ( snake_case__ , snake_case__ , snake_case__=10_24 , snake_case__=10_24 , snake_case__=False , **snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path="""train""" , **snake_case__ )
SCREAMING_SNAKE_CASE__ = tok.pad_token_id
def get_lens(snake_case__ ):
SCREAMING_SNAKE_CASE__ = tqdm(
DataLoader(snake_case__ , batch_size=5_12 , num_workers=8 , shuffle=snake_case__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
SCREAMING_SNAKE_CASE__ = []
for batch in dl:
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].ne(snake_case__ ).sum(1 ).tolist()
SCREAMING_SNAKE_CASE__ = batch["""labels"""].ne(snake_case__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case__ , snake_case__ ):
max_lens.append(max(snake_case__ , snake_case__ ) )
else:
max_lens.extend(snake_case__ )
return max_lens
SCREAMING_SNAKE_CASE__ = get_lens(snake_case__ )
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path="""val""" , **snake_case__ )
SCREAMING_SNAKE_CASE__ = get_lens(snake_case__ )
pickle_save(snake_case__ , train_ds.len_file )
pickle_save(snake_case__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 616
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
def __init__( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int=2 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=1_0 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[str]=3_2 * 8 , __UpperCAmelCase : List[Any]=3_2 * 8 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Tuple=6_4 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_size
SCREAMING_SNAKE_CASE__ = max_size
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = hidden_dim
SCREAMING_SNAKE_CASE__ = hidden_dim
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ = (torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ = self.num_queries
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ = self.num_channels
SCREAMING_SNAKE_CASE__ = 6_4
SCREAMING_SNAKE_CASE__ = 1_2_8
SCREAMING_SNAKE_CASE__ = self.hidden_dim
SCREAMING_SNAKE_CASE__ = self.hidden_dim
SCREAMING_SNAKE_CASE__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=False ) -> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = MaskaFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(
pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ : Any = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ = MaskaFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=__UpperCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE__ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation(__UpperCAmelCase ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : Optional[Any] = 1E-4
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__UpperCAmelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""].to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
SCREAMING_SNAKE_CASE__ = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 616
| 1
|
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> List[str]:
_a : List[Any] = data
_a : List[str] = previous
_a : Union[str, Any] = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def __lowercase ( self ) -> int:
return self.data
def __lowercase ( self ) -> Union[str, Any]:
return self.next
def __lowercase ( self ) -> str:
return self.previous
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : int = head
def __iter__( self ) -> List[Any]:
return self
def __lowercase ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
_a : Optional[int] = self.current.get_data()
_a : Dict = self.current.get_next()
return value
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
_a : Tuple = None # First node in list
_a : List[str] = None # Last node in list
def __str__( self ) -> List[str]:
_a : Optional[int] = self.head
_a : int = []
while current is not None:
nodes.append(current.get_data() )
_a : Optional[Any] = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
_a : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def __lowercase ( self ) -> Any:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self , _a ) -> None:
if self.head is None:
_a : str = node
_a : Tuple = node
else:
self.insert_before_node(self.head , _a )
def __lowercase ( self , _a ) -> None:
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> None:
_a : str = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : List[str] = node
_a : str = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : List[str] = node_to_insert
_a : List[str] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = node
_a : List[str] = node.next
if node.get_next() is None:
_a : Any = node_to_insert
else:
_a : List[str] = node_to_insert
_a : Union[str, Any] = node_to_insert
def __lowercase ( self , _a , _a ) -> None:
_a : List[Any] = 1
_a : Any = Node(_a )
_a : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_a : Tuple = node.next
self.insert_after_node(self.tail , _a )
def __lowercase ( self , _a ) -> Node:
_a : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_a : Any = node.get_next()
raise Exception('''Node not found''' )
def __lowercase ( self , _a ) -> Union[str, Any]:
if (node := self.get_node(_a )) is not None:
if node == self.head:
_a : Optional[Any] = self.head.get_next()
if node == self.tail:
_a : Any = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def __lowercase ( _a ) -> None:
if node.get_next():
_a : Optional[int] = node.previous
if node.get_previous():
_a : List[Any] = node.next
_a : Optional[int] = None
_a : Tuple = None
def __lowercase ( self ) -> str:
return self.head is None
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
lowerCamelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vqvae_and_unet
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
lowerCamelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase_ = self.dummy_unet_condition
lowerCamelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
lowerCamelCase_ = torch.rand((1, 1, 10) )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = torch_device
lowerCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
lowerCamelCase_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
lowerCamelCase_ = pipe(generator=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.audios[0]
lowerCamelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
lowerCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42
| 0
|
from datetime import datetime
import requests
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_a : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase_ ).content
if __name__ == "__main__":
__UpperCAmelCase : Any = input('Enter Video/IGTV url: ').strip()
__UpperCAmelCase : Any = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 249
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__UpperCAmelCase : Dict = logging.getLogger(__name__)
__UpperCAmelCase : Dict = 50 # max width of layer names
__UpperCAmelCase : List[Any] = 70 # max width of quantizer names
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Union[str, Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=UpperCamelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=UpperCamelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=UpperCamelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=UpperCamelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=UpperCamelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=UpperCamelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( UpperCamelCase_ ):
if args.calibrator == "max":
_a : List[str] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_a : List[str] = '''histogram'''
elif args.calibrator == "mse":
_a : str = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
_a : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase_ )
_a : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=UpperCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase_ , [''''''] , _disabled=UpperCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase_ , args.quant_disable_keyword , _disabled=UpperCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=UpperCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=UpperCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase_ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase_ , UpperCamelCase_ )
if args.clip_gelu:
clip_gelu(UpperCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
def fusea(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_a : List[Any] = qq._amax.detach().item()
_a : Any = qk._amax.detach().item()
_a : Tuple = qv._amax.detach().item()
_a : str = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
qq._amax.fill_(UpperCamelCase_ )
qk._amax.fill_(UpperCamelCase_ )
qv._amax.fill_(UpperCamelCase_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_a : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase_ )
_a : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_a : str = mod.weight.shape[0]
_a : Any = mod._weight_quantizer._amax.detach()
_a : Optional[int] = torch.ones(UpperCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_a : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_a : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_a : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase_ , keepdims=UpperCamelCase_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
_a : List[Any] = amax
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=25 , UpperCamelCase_=180 , UpperCamelCase_=None ):
if ignore is None:
_a : Any = []
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a : int = [ignore]
_a : Any = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase_ , '''weight''' ):
continue
_a : str = max(UpperCamelCase_ , len(UpperCamelCase_ ) )
for name, mod in model.named_modules():
_a : Any = getattr(UpperCamelCase_ , '''_input_quantizer''' , UpperCamelCase_ )
_a : int = getattr(UpperCamelCase_ , '''_weight_quantizer''' , UpperCamelCase_ )
if not hasattr(UpperCamelCase_ , '''weight''' ):
continue
if type(UpperCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase_ ) is str and s in name]:
continue
_a : Any = f"""Act:{input_q.extra_repr()}"""
_a : List[str] = f"""Wgt:{weight_q.extra_repr()}"""
_a : Optional[Any] = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(UpperCamelCase_ ) <= line_width:
logger.info(UpperCamelCase_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : List[str] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase_ , UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="both" , **UpperCamelCase_ ):
_a : List[Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , '''_input_quantizer''' , UpperCamelCase_ , UpperCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase_ , UpperCamelCase_ , '''_weight_quantizer''' , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase_ , '''_input_quantizer''' ) or hasattr(UpperCamelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
set_quantizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(UpperCamelCase_ , UpperCamelCase_ ):
_a : List[Any] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info(UpperCamelCase_ )
| 249
| 1
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : Tuple = NewType('DataClass', Any)
lowercase : Tuple = NewType('DataClassType', Any)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( *,
snake_case__ = None , snake_case__ = None , snake_case__ = dataclasses.MISSING , snake_case__ = dataclasses.MISSING , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A : Optional[int] = {}
if aliases is not None:
A : Dict = aliases
if help is not None:
A : str = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class A ( __snake_case ):
__magic_name__ = 42
def __init__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if "formatter_class" not in kwargs:
A : int = ArgumentDefaultsHelpFormatter
super().__init__(**SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE ):
A : Tuple = [dataclass_types]
A : Optional[Any] = list(SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = F'--{field.name}'
A : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , SCREAMING_SNAKE_CASE ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
A : str = kwargs.pop('''aliases''' , [] )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = [aliases]
A : int = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE , '''UnionType''' ) and isinstance(SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
A : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A : Dict = (
field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
A : Dict = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , SCREAMING_SNAKE_CASE ) and issubclass(field.type , SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
A : int = field.type.__args__
else:
A : str = [x.value for x in field.type]
A : List[str] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
A : List[str] = field.default
else:
A : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A : Tuple = copy(SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
A : Optional[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A : Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
A : List[str] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
A : Tuple = True
elif isclass(SCREAMING_SNAKE_CASE ) and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = field.type.__args__[0]
A : str = '''+'''
if field.default_factory is not dataclasses.MISSING:
A : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
A : Optional[int] = True
else:
A : Any = field.type
if field.default is not dataclasses.MISSING:
A : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
A : List[Any] = field.default_factory()
else:
A : Optional[int] = True
parser.add_argument(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A : List[Any] = False
parser.add_argument(F'--no_{field.name}' , action='''store_false''' , dest=field.name , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if hasattr(SCREAMING_SNAKE_CASE , '''_argument_group_name''' ):
A : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
A : int = self
try:
A : Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE ):
A : List[str] = '''.'''.join(map(SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(SCREAMING_SNAKE_CASE ):
if not field.init:
continue
A : List[str] = type_hints[field.name]
self._parse_dataclass_field(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A : Optional[Any] = []
if args_filename:
args_files.append(Path(SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A : List[str] = ArgumentParser()
args_file_parser.add_argument(SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
A, A : List[Any] = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = vars(SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip('''-''' ) , SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
A : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
A, A : List[str] = self.parse_known_args(args=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = []
for dtype in self.dataclass_types:
A : Union[str, Any] = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE ) if f.init}
A : Dict = {k: v for k, v in vars(SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = dtype(**SCREAMING_SNAKE_CASE )
outputs.append(SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
A : Optional[Any] = set(args.keys() )
A : List[str] = []
for dtype in self.dataclass_types:
A : List[str] = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE ) if f.init}
A : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A : Optional[int] = dtype(**SCREAMING_SNAKE_CASE )
outputs.append(SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE )}' )
return tuple(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(SCREAMING_SNAKE_CASE ) , encoding='''utf-8''' ) as open_json_file:
A : Dict = json.loads(open_json_file.read() )
A : Tuple = self.parse_dict(SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
A : Optional[int] = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 634
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : int = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''deta'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=900 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="sine" , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=300 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.25 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[Any] = backbone_config.pop('''model_type''' )
A : List[Any] = CONFIG_MAPPING[backbone_model_type]
A : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
A : str = backbone_config
A : Optional[int] = num_queries
A : Dict = max_position_embeddings
A : Optional[Any] = d_model
A : Optional[Any] = encoder_ffn_dim
A : List[str] = encoder_layers
A : Tuple = encoder_attention_heads
A : Optional[Any] = decoder_ffn_dim
A : Optional[int] = decoder_layers
A : List[str] = decoder_attention_heads
A : Union[str, Any] = dropout
A : str = attention_dropout
A : Any = activation_dropout
A : Optional[int] = activation_function
A : Tuple = init_std
A : Any = init_xavier_std
A : Optional[Any] = encoder_layerdrop
A : int = auxiliary_loss
A : Dict = position_embedding_type
# deformable attributes
A : str = num_feature_levels
A : Optional[int] = encoder_n_points
A : Any = decoder_n_points
A : Tuple = two_stage
A : Dict = two_stage_num_proposals
A : List[str] = with_box_refine
A : List[str] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A : Dict = class_cost
A : Optional[int] = bbox_cost
A : Optional[Any] = giou_cost
# Loss coefficients
A : int = mask_loss_coefficient
A : int = dice_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : int = giou_loss_coefficient
A : Dict = eos_coefficient
A : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.d_model
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Any = copy.deepcopy(self.__dict__ )
A : Dict = self.backbone_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 634
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase = 'http://www.mocksite.com/file1.txt'
UpperCamelCase = '"text": ["foo", "foo"]'
UpperCamelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _A :
lowercase_ : Any = 200
lowercase_ : Dict = {'''Content-Length''': '''100'''}
lowercase_ : Tuple = {}
def a ( self : str , **lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
return [bytes(lowerCamelCase__ , """utf-8""" )]
def __lowerCamelCase ( *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[int] ) -> str:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
import requests
monkeypatch.setattr(__lowerCAmelCase , """request""" , __lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : int = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[Any] = {"""train""": url}
__UpperCamelCase : str = """dummy"""
__UpperCamelCase : int = """downloads"""
__UpperCamelCase : List[Any] = tmp_path
__UpperCamelCase : str = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
__UpperCamelCase : List[Any] = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
__UpperCamelCase : Tuple = dl_manager.download(__lowerCAmelCase )
__UpperCamelCase : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [downloaded_paths]
__UpperCamelCase : int = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
__UpperCamelCase : Dict = downloaded_paths.values()
__UpperCamelCase : Tuple = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCamelCase : Tuple = Path(__lowerCAmelCase )
__UpperCamelCase : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCamelCase : Dict = downloaded_path.read_text()
assert content == CONTENT
__UpperCamelCase : Optional[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__UpperCamelCase : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> Tuple:
__UpperCamelCase : Optional[int] = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = {"""train""": filename}
__UpperCamelCase : Optional[Any] = """dummy"""
__UpperCamelCase : Tuple = xz_file.parent
__UpperCamelCase : Optional[int] = """extracted"""
__UpperCamelCase : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
__UpperCamelCase : List[str] = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
__UpperCamelCase : int = dl_manager.extract(__lowerCAmelCase )
__UpperCamelCase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [extracted_paths]
__UpperCamelCase : List[str] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
__UpperCamelCase : Tuple = extracted_paths.values()
__UpperCamelCase : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCamelCase : Dict = Path(__lowerCAmelCase )
__UpperCamelCase : Dict = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCamelCase : int = extracted_path.read_text()
__UpperCamelCase : Tuple = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> str:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
__UpperCamelCase : Dict = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> List[str]:
__UpperCamelCase : List[Any] = request.getfixturevalue(__lowerCAmelCase )
__UpperCamelCase : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Dict:
__UpperCamelCase : Optional[Any] = request.getfixturevalue(__lowerCAmelCase )
__UpperCamelCase : Dict = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
__UpperCamelCase : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 515
|
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
__UpperCamelCase : Dict = len(__lowerCAmelCase )
for i in range(1 , __lowerCAmelCase ):
__UpperCamelCase : Dict = collection[i]
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Dict = i - 1
while low <= high:
__UpperCamelCase : int = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase : str = mid - 1
else:
__UpperCamelCase : str = mid + 1
for j in range(__lowerCAmelCase , __lowerCAmelCase , -1 ):
__UpperCamelCase : str = collection[j - 1]
__UpperCamelCase : int = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 515
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = field(default=A__ , metadata={"help": "Whether to use SortishSampler or not."} )
snake_case_ = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
snake_case_ = field(
default=A__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
snake_case_ = field(
default=A__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
snake_case_ = field(
default=A__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCAmelCase ( self : Optional[Any] )-> Dict:
snake_case = super().to_dict()
for k, v in d.items():
if isinstance(__snake_case , __snake_case ):
snake_case = v.to_dict()
return d
| 369
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : LevitConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True ) -> int:
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case = timm.create_model("""levit_128s""" , pretrained=__lowerCAmelCase )
else:
snake_case = timm.create_model("""levit_128""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 1_92:
snake_case = timm.create_model("""levit_192""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 2_56:
snake_case = timm.create_model("""levit_256""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 3_84:
snake_case = timm.create_model("""levit_384""" , pretrained=__lowerCAmelCase )
from_model.eval()
snake_case = LevitForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
snake_case = OrderedDict()
snake_case = from_model.state_dict()
snake_case = list(from_model.state_dict().keys() )
snake_case = list(our_model.state_dict().keys() )
print(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for i in range(len(__lowerCAmelCase ) ):
snake_case = weights[og_keys[i]]
our_model.load_state_dict(__lowerCAmelCase )
snake_case = torch.randn((2, 3, 2_24, 2_24) )
snake_case = from_model(__lowerCAmelCase )
snake_case = our_model(__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
snake_case = name
print(__lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ) -> List[Any]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = (1, num_labels)
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
snake_case = {
"""levit-128S""": 1_28,
"""levit-128""": 1_28,
"""levit-192""": 1_92,
"""levit-256""": 2_56,
"""levit-384""": 3_84,
}
snake_case = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 369
| 1
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :int ) -> int:
__UpperCamelCase = 2
__UpperCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293
| 0
|
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowerCAmelCase_ : str = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ : List[Any] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 171
|
from string import ascii_uppercase
A__ : Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)}
A__ : Any = dict(enumerate(ascii_uppercase))
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : int = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = 0
while True:
if x == i:
lowerCAmelCase_ : Optional[int] = 0
if len(__UpperCamelCase ) == len(__UpperCamelCase ):
break
key += key[i]
i += 1
return key
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Optional[Any] = ''''''
lowerCAmelCase_ : int = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCAmelCase_ : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Tuple = ''''''
lowerCAmelCase_ : Any = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCAmelCase_ : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def UpperCamelCase( ):
lowerCAmelCase_ : Tuple = '''THE GERMAN ATTACK'''
lowerCAmelCase_ : Dict = '''SECRET'''
lowerCAmelCase_ : Union[str, Any] = generate_key(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : int = cipher_text(__UpperCamelCase ,__UpperCamelCase )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(__UpperCamelCase ,__UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 171
| 1
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : int=1 , snake_case__ : Dict=False , **snake_case__ : List[str] ):
super().__init__(**snake_case__ )
lowerCamelCase_ : Optional[int] =vocab_size
lowerCamelCase_ : Any =d_embed
lowerCamelCase_ : Dict =d_proj
lowerCamelCase_ : Optional[int] =cutoffs + [vocab_size]
lowerCamelCase_ : Union[str, Any] =[0] + self.cutoffs
lowerCamelCase_ : Any =div_val
lowerCamelCase_ : List[Any] =self.cutoffs[0]
lowerCamelCase_ : Union[str, Any] =len(self.cutoffs ) - 1
lowerCamelCase_ : List[str] =self.shortlist_size + self.n_clusters
lowerCamelCase_ : int =keep_order
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : List[Any] =[]
def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] ):
if self.n_clusters > 0:
lowerCamelCase_ : Dict =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=snake_case__ , name="cluster_weight" )
lowerCamelCase_ : Optional[Any] =self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=snake_case__ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase_ : Any =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=snake_case__ , name=F"""out_projs_._{i}""" , )
self.out_projs.append(snake_case__ )
else:
self.out_projs.append(snake_case__ )
lowerCamelCase_ : Tuple =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=snake_case__ , name=F"""out_layers_._{i}_._weight""" , )
lowerCamelCase_ : Dict =self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=snake_case__ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase_ : Any =self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : List[str] =self.d_embed // (self.div_val**i)
lowerCamelCase_ : List[Any] =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=snake_case__ , name=F"""out_projs_._{i}""" )
self.out_projs.append(snake_case__ )
lowerCamelCase_ : Dict =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=snake_case__ , name=F"""out_layers_._{i}_._weight""" , )
lowerCamelCase_ : int =self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=snake_case__ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(snake_case__ )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Any=None ):
lowerCamelCase_ : List[Any] =x
if proj is not None:
lowerCamelCase_ : Tuple =tf.einsum("ibd,ed->ibe" , snake_case__ , snake_case__ )
return tf.einsum("ibd,nd->ibn" , snake_case__ , snake_case__ ) + b
@staticmethod
def UpperCAmelCase__ ( snake_case__ : int , snake_case__ : Tuple ):
lowerCamelCase_ : int =shape_list(snake_case__ )
lowerCamelCase_ : Optional[Any] =tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase_ : Union[str, Any] =tf.stack([r, target] , 1 )
return tf.gather_nd(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict=True , snake_case__ : Tuple=False ):
lowerCamelCase_ : Optional[Any] =0
if self.n_clusters == 0:
lowerCamelCase_ : Optional[Any] =self._logit(snake_case__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase_ : Dict =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case__ , logits=snake_case__ )
lowerCamelCase_ : Tuple =tf.nn.log_softmax(snake_case__ , axis=-1 )
else:
lowerCamelCase_ : Tuple =shape_list(snake_case__ )
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : Tuple =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase_ : Dict =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase_ : Dict =(target >= l_idx) & (target < r_idx)
lowerCamelCase_ : List[str] =tf.where(snake_case__ )
lowerCamelCase_ : List[Any] =tf.boolean_mask(snake_case__ , snake_case__ ) - l_idx
if self.div_val == 1:
lowerCamelCase_ : List[Any] =self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase_ : Union[str, Any] =self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase_ : Any =self.out_layers[i][0]
lowerCamelCase_ : Union[str, Any] =self.out_layers[i][1]
if i == 0:
lowerCamelCase_ : int =tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase_ : str =tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase_ : Tuple =self._logit(snake_case__ , snake_case__ , snake_case__ , self.out_projs[0] )
lowerCamelCase_ : Dict =tf.nn.log_softmax(snake_case__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase_ : Union[str, Any] =tf.boolean_mask(snake_case__ , snake_case__ )
lowerCamelCase_ : Union[str, Any] =self._gather_logprob(snake_case__ , snake_case__ )
else:
lowerCamelCase_ : Dict =self._logit(snake_case__ , snake_case__ , snake_case__ , self.out_projs[i] )
lowerCamelCase_ : List[Any] =tf.nn.log_softmax(snake_case__ )
lowerCamelCase_ : Tuple =self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase_ : Any =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case__ )
if target is not None:
lowerCamelCase_ : Optional[Any] =tf.boolean_mask(snake_case__ , snake_case__ )
lowerCamelCase_ : Union[str, Any] =tf.boolean_mask(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[int] =self._gather_logprob(snake_case__ , snake_case__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case__ , -cur_logprob , shape_list(snake_case__ ) )
lowerCamelCase_ : Optional[Any] =tf.concat(snake_case__ , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase_ : str =tf.reduce_mean(snake_case__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case__ , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 714
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Tuple = 'T5Config'
def _snake_case ( lowerCamelCase__ : jnp.array , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> jnp.ndarray:
lowerCamelCase_ : Optional[Any] =jnp.zeros_like(lowerCamelCase__ )
lowerCamelCase_ : Dict =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : Union[str, Any] =shifted_input_ids.at[:, 0].set(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =jnp.where(shifted_input_ids == -100 , lowerCamelCase__ , lowerCamelCase__ )
return shifted_input_ids
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Dict = "mt5"
_UpperCAmelCase :Tuple = MTaConfig
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "mt5"
_UpperCAmelCase :Optional[int] = MTaConfig
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "mt5"
_UpperCAmelCase :Tuple = MTaConfig
| 244
| 0
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __snake_case ( _UpperCamelCase ) -> Union[str, Any]:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ) -> int:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ) -> int:
_a = '''mock-s3-bucket'''
_a = f"s3://{mock_bucket}"
_a = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
_a = '''./local/path'''
_a = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __snake_case ( _UpperCamelCase ) -> str:
_a = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
_a = fsspec.filesystem('''file''' )
_a = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
_a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_a = input_paths[compression_fs_class.protocol]
if input_path is None:
_a = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
_a = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_a = os.path.basename(_UpperCamelCase )
_a = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
_a = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_a = compressed_file_paths[protocol]
_a = '''dataset.jsonl'''
_a = f"{protocol}://{member_file_path}::{compressed_file_path}"
_a , *_a = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
_a = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __snake_case ( ) -> int:
_a = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 487
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ) -> Any:
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(_UpperCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 487
| 1
|
from numpy import exp, pi, sqrt
def UpperCamelCase_ ( __a , __a = 0.0 , __a = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a , __a , __a ) -> Tuple:
a__ : Union[str, Any] = os.path.abspath(__a )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
a__ : str = tf.train.list_variables(__a )
a__ : Tuple = []
a__ : Any = []
a__ : List[str] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a__ : Optional[Any] = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
a__ : Tuple = name[1:]
# figure out how many levels deep the name is
a__ : Tuple = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(__a )
# read data
a__ : str = tf.train.load_variable(__a , __a )
names.append("/".join(__a ) )
arrays.append(__a )
logger.info(f'''Read a total of {len(__a ):,} layers''' )
# Sanity check
if len(set(__a ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(__a ) )})''' )
a__ : int = list(set(__a ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(__a , __a ):
a__ : str = full_name.split("/" )
a__ : str = model
a__ : List[Any] = []
for i, m_name in enumerate(__a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
a__ : int = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
a__ : int = getattr(__a , "embeddings" )
a__ : Optional[Any] = getattr(__a , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
a__ : str = getattr(__a , "encoder" )
a__ : str = getattr(__a , "layer" )
a__ : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
a__ : Any = getattr(__a , "pooler" )
a__ : List[str] = getattr(__a , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
a__ : List[Any] = getattr(__a , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
a__ : Optional[Any] = getattr(__a , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
a__ : int = getattr(__a , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
a__ : Tuple = getattr(__a , "token_type_embeddings" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
a__ : Dict = getattr(__a , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
a__ : str = getattr(__a , "attention" )
a__ : str = getattr(__a , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
a__ : Tuple = getattr(__a , "attention" )
a__ : Any = getattr(__a , "output" )
a__ : Tuple = getattr(__a , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
a__ : Optional[Any] = getattr(__a , "attention" )
a__ : Tuple = getattr(__a , "output" )
a__ : Dict = getattr(__a , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
a__ : Any = getattr(__a , "output" )
a__ : List[Any] = getattr(__a , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
a__ : Any = getattr(__a , "output" )
a__ : Optional[int] = getattr(__a , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
a__ : int = getattr(__a , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
a__ : Optional[Any] = getattr(__a , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
a__ : Dict = getattr(__a , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
a__ : Union[str, Any] = getattr(__a , "intermediate" )
a__ : List[Any] = getattr(__a , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
a__ : int = getattr(__a , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
a__ : List[Any] = getattr(__a , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
a__ : Optional[int] = getattr(__a , "weight" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
a__ : List[str] = ".".join(__a )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , __a ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , __a ):
a__ : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
a__ : List[str] = array.transpose()
if pointer.shape == array.shape:
a__ : Dict = torch.from_numpy(__a )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[int]:
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
a__ : Union[str, Any] = BertConfig.from_json_file(__a )
a__ : Optional[int] = BertModel(__a )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__a , __a , __a )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
UpperCamelCase : int = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 151
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : Dict = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = return_attention_mask
UpperCAmelCase = do_normalize
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = WavaVecaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = WavaVecaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCAmelCase = WavaVecaConfig.from_pretrained(_A )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 130
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A : int = None
__A : int = logging.get_logger(__name__)
__A : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__A : Any = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__A : List[str] = "▁"
# Segments (not really needed)
__A : Tuple = 0
__A : str = 1
__A : Any = 2
__A : Dict = 3
__A : Any = 4
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = '''left'''
UpperCAmelCase__ = XLNetTokenizer
def __init__( self , _A=None , _A=None , _A=False , _A=True , _A=False , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<sep>" , _A="<pad>" , _A="<cls>" , _A="<mask>" , _A=["<eop>", "<eod>"] , **_A , ):
'''simple docstring'''
UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
UpperCAmelCase = 3
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 130
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase_ ( *_UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase=True , _UpperCamelCase=2 ):
'''simple docstring'''
from .. import __version__
__lowercase = take_from
__lowercase = ()
if not isinstance(args[0] , _UpperCamelCase ):
__lowercase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_UpperCamelCase ).base_version ) >= version.parse(_UpperCamelCase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
__lowercase = None
if isinstance(_UpperCamelCase , _UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_UpperCamelCase ),)
__lowercase = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(_UpperCamelCase , _UpperCamelCase ):
values += (getattr(_UpperCamelCase , _UpperCamelCase ),)
__lowercase = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
__lowercase = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
__lowercase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _UpperCamelCase , stacklevel=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0:
__lowercase = inspect.getouterframes(inspect.currentframe() )[1]
__lowercase = call_frame.filename
__lowercase = call_frame.lineno
__lowercase = call_frame.function
__lowercase , __lowercase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(_UpperCamelCase ) == 0:
return
elif len(_UpperCamelCase ) == 1:
return values[0]
return values
| 702
|
from itertools import count
def lowercase_ ( _UpperCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 527
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCamelCase =logging.getLogger(__name__)
_lowerCamelCase =5_0 # max width of layer names
_lowerCamelCase =7_0 # max width of quantizer names
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""", type=lowerCamelCase, default=8, help="""weight precision""" )
group.add_argument("""--aprec""", type=lowerCamelCase, default=8, help="""activation precision""" )
group.add_argument("""--quant-per-tensor""", action="""store_true""", help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""", action="""store_true""", help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""", action="""store_true""", help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""", type=lowerCamelCase, nargs="""+""", help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""", type=lowerCamelCase, help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""", type=lowerCamelCase, help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""", default="""max""", help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""", default=lowerCamelCase, type=lowerCamelCase, help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""", action="""store_true""", help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""", metavar="""N""", type=lowerCamelCase, help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""", action="""store_true""", help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
), )
def _a ( lowerCamelCase ):
if args.calibrator == "max":
lowerCamelCase : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
lowerCamelCase : Optional[Any] = """histogram"""
elif args.calibrator == "mse":
lowerCamelCase : Optional[Any] = """histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
lowerCamelCase : Optional[int] = QuantDescriptor(num_bits=args.aprec, calib_method=lowerCamelCase )
lowerCamelCase : List[Any] = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False ):
logger.info("""Configuring Model for Quantization""" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase, ["""embeddings"""], which="""weight""", _disabled=lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase, [""""""], _disabled=lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase, args.quant_disable_keyword, _disabled=lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase, [R"""layer.\d+.""" + args.quant_disable_layer_module], _disabled=lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase, [R"""layer.\d+.""" + args.quant_enable_layer_module], _disabled=lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase, lowerCamelCase )
if args.clip_gelu:
clip_gelu(lowerCamelCase, args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase )
def _a ( lowerCamelCase ):
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _a ( lowerCamelCase, lowerCamelCase ):
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""", percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
def fusea(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase, """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
lowerCamelCase : Optional[int] = qq._amax.detach().item()
lowerCamelCase : Union[str, Any] = qk._amax.detach().item()
lowerCamelCase : Tuple = qv._amax.detach().item()
lowerCamelCase : int = max(lowerCamelCase, lowerCamelCase, lowerCamelCase )
qq._amax.fill_(lowerCamelCase )
qk._amax.fill_(lowerCamelCase )
qv._amax.fill_(lowerCamelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer )
def _a ( lowerCamelCase, lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
lowerCamelCase : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase )
lowerCamelCase : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
lowerCamelCase : Dict = mod.weight.shape[0]
lowerCamelCase : Tuple = mod._weight_quantizer._amax.detach()
lowerCamelCase : Tuple = torch.ones(lowerCamelCase, dtype=amax.dtype, device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer, """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase : str = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight, axis=lowerCamelCase, keepdims=lowerCamelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCamelCase : Optional[int] = amax
def _a ( lowerCamelCase, lowerCamelCase=25, lowerCamelCase=180, lowerCamelCase=None ):
if ignore is None:
lowerCamelCase : Union[str, Any] = []
elif not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = [ignore]
lowerCamelCase : Any = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase, """weight""" ):
continue
lowerCamelCase : str = max(lowerCamelCase, len(lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase : Optional[Any] = getattr(lowerCamelCase, """_input_quantizer""", lowerCamelCase )
lowerCamelCase : Union[str, Any] = getattr(lowerCamelCase, """_weight_quantizer""", lowerCamelCase )
if not hasattr(lowerCamelCase, """weight""" ):
continue
if type(lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase : str = F'''Act:{input_q.extra_repr()}'''
lowerCamelCase : List[str] = F'''Wgt:{weight_q.extra_repr()}'''
lowerCamelCase : Union[str, Any] = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase ) <= line_width:
logger.info(lowerCamelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase, pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase, lowerCamelCase )
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="both", **lowerCamelCase ):
lowerCamelCase : int = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase, lowerCamelCase, """_input_quantizer""", lowerCamelCase, lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase, lowerCamelCase, """_weight_quantizer""", lowerCamelCase, lowerCamelCase )
logger.info(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, **lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_input_quantizer""" ) or hasattr(lowerCamelCase, """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase, lowerCamelCase ):
set_quantizers(lowerCamelCase, lowerCamelCase, **lowerCamelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
logger.info(lowerCamelCase )
| 681
|
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681
| 1
|
def _SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
_UpperCAmelCase = 4
_UpperCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 711
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _A ( __lowercase ):
__a = 42
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE=True , ):
super().__init__()
_UpperCAmelCase = layers_per_block
_UpperCAmelCase = torch.nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCAmelCase = None
_UpperCAmelCase = nn.ModuleList([] )
# down
_UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
_UpperCAmelCase = i == len(_SCREAMING_SNAKE_CASE ) - 1
_UpperCAmelCase = get_down_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
self.down_blocks.append(_SCREAMING_SNAKE_CASE )
# mid
_UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# out
_UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6 )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = 2 * out_channels if double_z else out_channels
_UpperCAmelCase = nn.Convad(block_out_channels[-1] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
_UpperCAmelCase = False
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = x
_UpperCAmelCase = self.conv_in(_SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
# middle
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# middle
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
_UpperCAmelCase = down_block(_SCREAMING_SNAKE_CASE )
# middle
_UpperCAmelCase = self.mid_block(_SCREAMING_SNAKE_CASE )
# post-process
_UpperCAmelCase = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conv_act(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
_UpperCAmelCase = layers_per_block
_UpperCAmelCase = nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCAmelCase = None
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
_UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# up
_UpperCAmelCase = list(reversed(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = reversed_block_out_channels[i]
_UpperCAmelCase = i == len(_SCREAMING_SNAKE_CASE ) - 1
_UpperCAmelCase = get_up_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , resnet_time_scale_shift=_SCREAMING_SNAKE_CASE , )
self.up_blocks.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output_channel
# out
if norm_type == "spatial":
_UpperCAmelCase = SpatialNorm(block_out_channels[0] , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_SCREAMING_SNAKE_CASE , eps=1e-6 )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Convad(block_out_channels[0] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
_UpperCAmelCase = False
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_UpperCAmelCase = z
_UpperCAmelCase = self.conv_in(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
# middle
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
_UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# middle
_UpperCAmelCase = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
_UpperCAmelCase = up_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
_UpperCAmelCase = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = self.conv_norm_out(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conv_act(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="random" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ):
super().__init__()
_UpperCAmelCase = n_e
_UpperCAmelCase = vq_embed_dim
_UpperCAmelCase = beta
_UpperCAmelCase = legacy
_UpperCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
_UpperCAmelCase = self.used.shape[0]
_UpperCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCAmelCase = self.re_embed
_UpperCAmelCase = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
_UpperCAmelCase = n_e
_UpperCAmelCase = sane_index_shape
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
_UpperCAmelCase = inds.reshape(ishape[0] , -1 )
_UpperCAmelCase = self.used.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCAmelCase = match.argmax(-1 )
_UpperCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCAmelCase = self.unknown_index
return new.reshape(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
_UpperCAmelCase = inds.reshape(ishape[0] , -1 )
_UpperCAmelCase = self.used.to(_SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCAmelCase = 0 # simply set to zero
_UpperCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _SCREAMING_SNAKE_CASE )
return back.reshape(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
_UpperCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCAmelCase = torch.argmin(torch.cdist(_SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
_UpperCAmelCase = self.embedding(_SCREAMING_SNAKE_CASE ).view(z.shape )
_UpperCAmelCase = None
_UpperCAmelCase = None
# compute loss for embedding
if not self.legacy:
_UpperCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCAmelCase = self.remap_to_used(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCAmelCase = self.unmap_to_all(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCAmelCase = self.embedding(_SCREAMING_SNAKE_CASE )
if shape is not None:
_UpperCAmelCase = z_q.view(_SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
_UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _A ( __lowercase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_UpperCAmelCase = parameters
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , dim=1 )
_UpperCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCAmelCase = deterministic
_UpperCAmelCase = torch.exp(0.5 * self.logvar )
_UpperCAmelCase = torch.exp(self.logvar )
if self.deterministic:
_UpperCAmelCase = _UpperCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
_UpperCAmelCase = randn_tensor(
self.mean.shape , generator=_SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCAmelCase = self.mean + self.std * sample
return x
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
return self.mean
| 175
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowercase: str = '''sshleifer/bart-tiny-random'''
_lowercase: Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return AutoConfig.from_pretrained(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , *_lowerCAmelCase = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
with self.assertRaises(lowercase__ ):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__ )
| 192
|
import re
def _lowerCamelCase ( snake_case ):
if len(re.findall('[ATCG]' , snake_case ) ) != len(snake_case ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192
| 1
|
'''simple docstring'''
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowercase_ ( __A : str=None ) -> List[str]:
"""simple docstring"""
if subparsers is not None:
lowercase : Optional[int] =subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase : List[Any] =argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase : Dict =parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__A , default=__A , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__A , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__A , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase : Any =parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__A , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__A ):
lowercase : List[Any] =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase : Any =defaults.command_file
if not args.command and defaults.commands is not None:
lowercase : List[Any] =defaults.commands
if not args.tpu_name:
lowercase : Dict =defaults.tpu_name
if not args.tpu_zone:
lowercase : Dict =defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase : str ='''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase : int ='''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __A ):
lowercase : Any =F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase : List[str] =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __A ):
lowercase : Union[str, Any] =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase : Tuple =['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase : Dict ='''; '''.join(__A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase : int =['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(__A )}' )
return
subprocess.run(__A )
print('''Successfully setup pod.''' )
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : str =tpu_command_parser()
lowercase : Any =parser.parse_args()
tpu_command_launcher(__A )
| 703
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8
| 0
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = 'T5Config'
def a__ ( lowercase : jnp.array, lowercase : int, lowercase : int ) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.zeros_like(lowercase )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(lowercase )
_UpperCamelCase = jnp.where(shifted_input_ids == -100, lowercase, lowercase )
return shifted_input_ids
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'mt5'
_snake_case : Union[str, Any] = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'mt5'
_snake_case : int = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'mt5'
_snake_case : Optional[Any] = MTaConfig
| 98
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(
features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= Generator(
cache_dir=lowerCAmelCase , features=lowerCAmelCase , generator=lowerCAmelCase , gen_kwargs=lowerCAmelCase , **lowerCAmelCase , )
def _A (self ):
# Build iterable dataset
if self.streaming:
__lowercase= self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
__lowercase= None
__lowercase= None
__lowercase= None
__lowercase= None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
__lowercase= self.builder.as_dataset(
split='train' , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 230
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : Optional[int] =logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
A_ : Optional[Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowerCamelCase ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ = deprecated_arg[3:]
setattr(self , lowerCamelCase , not kwargs.pop(lowerCamelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
a__ = kwargs.pop("""torchscript""" , self.torchscript )
a__ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
a__ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowerCamelCase )
A_ : bool = field(default=A_ ,metadata={'help': 'Trace the models using torchscript'} )
A_ : bool = field(default=A_ ,metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
A_ : str = field(
default='O1' ,metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} ,)
@cached_property
def _A ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
a__ = torch.device("""cpu""" )
a__ = 0
elif is_torch_tpu_available():
a__ = xm.xla_device()
a__ = 0
else:
a__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
a__ = torch.cuda.device_count()
return device, n_gpu
@property
def _A ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def _A ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _A ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def _A ( self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def _A ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[str] ={"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 412
| 0
|
from math import pi
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 36
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : str = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _snake_case ( A_ ):
_A = '''vit_msn'''
def __init__( self ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-06 ,UpperCamelCase=224 ,UpperCamelCase=16 ,UpperCamelCase=3 ,UpperCamelCase=True ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(**UpperCamelCase )
snake_case__ :Optional[int] = hidden_size
snake_case__ :Any = num_hidden_layers
snake_case__ :Dict = num_attention_heads
snake_case__ :List[str] = intermediate_size
snake_case__ :Tuple = hidden_act
snake_case__ :Optional[int] = hidden_dropout_prob
snake_case__ :List[Any] = attention_probs_dropout_prob
snake_case__ :Dict = initializer_range
snake_case__ :str = layer_norm_eps
snake_case__ :Union[str, Any] = image_size
snake_case__ :str = patch_size
snake_case__ :List[str] = num_channels
snake_case__ :List[Any] = qkv_bias
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_snake_case = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
_snake_case = 'hopper-medium-v2'
_snake_case = gym.make(env_name)
_snake_case = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
_snake_case = env.reset()
_snake_case = 0
_snake_case = 0
_snake_case = 1_000
_snake_case = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_snake_case = pipeline(obs, planning_horizon=32)
# execute action in environment
_snake_case , _snake_case , _snake_case , _snake_case = env.step(denorm_actions)
_snake_case = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_snake_case = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 245
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
_snake_case = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
_snake_case = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = RealmTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
_lowercase : Optional[int] = do_lower_case
_lowercase : List[str] = strip_accents
_lowercase : List[Any] = tokenize_chinese_chars
_lowercase : Optional[int] = normalizer_class(**_UpperCamelCase )
_lowercase : Optional[Any] = do_lower_case
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = PaddingStrategy.MAX_LENGTH
_lowercase : Tuple = text
_lowercase : int = kwargs.pop("text_pair" , _UpperCamelCase )
_lowercase : Union[str, Any] = kwargs.pop("return_tensors" , _UpperCamelCase )
_lowercase : Optional[int] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_UpperCamelCase ):
if batch_text_pair is not None:
_lowercase : List[str] = batch_text_pair[idx]
else:
_lowercase : Dict = None
_lowercase : List[str] = super().__call__(_UpperCamelCase , _UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
_lowercase : Any = encoded_candidates.get("input_ids" )
_lowercase : Optional[Any] = encoded_candidates.get("attention_mask" )
_lowercase : Union[str, Any] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_UpperCamelCase )
_lowercase : int = {key: item for key, item in output_data.items() if len(_UpperCamelCase ) != 0}
return BatchEncoding(_UpperCamelCase , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : List[str] = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 245
| 1
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Union[str, Any] =object()
# For specifying empty leaf dict `{}`
_A : Optional[int] =object()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCamelCase ) - len(UpperCamelCase ) + 1 ):
lowerCamelCase__ : List[Any] = [x.match(UpperCamelCase ) for x, y in zip(UpperCamelCase , ks[i:] )]
if matches and all(UpperCamelCase ):
return True
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
def replace(UpperCamelCase , UpperCamelCase ):
for rule, replacement in rules:
if _match(UpperCamelCase , UpperCamelCase ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE_ () -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = _get_partition_rules()
lowerCamelCase__ : Tuple = _replacement_rules(UpperCamelCase )
lowerCamelCase__ : str = {k: _unmatched for k in flatten_dict(UpperCamelCase )}
lowerCamelCase__ : Tuple = {k: replace(UpperCamelCase , UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase ) )
| 631
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.