code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : Optional[int]=[30, 30] , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=10 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : List[Any]=10 , ) -> Tuple:
'''simple docstring'''
A: str = parent
A: Union[str, Any] = batch_size
A: List[Any] = image_size
A: Tuple = patch_size
A: List[Any] = num_channels
A: List[Any] = is_training
A: str = use_labels
A: Any = hidden_size
A: List[Any] = num_hidden_layers
A: List[str] = num_attention_heads
A: Optional[int] = intermediate_size
A: Dict = hidden_act
A: Optional[Any] = hidden_dropout_prob
A: Any = attention_probs_dropout_prob
A: Tuple = type_sequence_label_size
A: int = initializer_range
A: Union[str, Any] = num_labels
A: Optional[int] = scope
A: List[str] = n_targets
A: int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A: List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A: str = num_patches + 1 + self.num_detection_tokens
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A: Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A: Dict = []
for i in range(self.batch_size ):
A: int = {}
A: Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A: Optional[Any] = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A: Tuple = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A: List[str] = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
'''simple docstring'''
A: Tuple = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A: Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A: Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A: Optional[int] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.prepare_config_and_inputs()
A , A , A: Union[str, Any] = config_and_inputs
A: int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ : Any = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Union[str, Any] = False
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
A: Optional[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A: List[Any] = []
for i in range(self.model_tester.batch_size ):
A: int = {}
A: int = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A: Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A: Optional[Any] = labels
return inputs_dict
def _snake_case ( self : str ) -> Dict:
'''simple docstring'''
A: Any = YolosModelTester(self )
A: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _snake_case ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : int ) -> Dict:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A , A: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
A , A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Union[str, Any] = model_class(UpperCamelCase__ )
A: Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A: Optional[int] = [*signature.parameters.keys()]
A: Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _snake_case ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
A , A: Any = self.model_tester.prepare_config_and_inputs_for_common()
A: Optional[int] = True
# in YOLOS, the seq_len is different
A: Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A: Tuple = True
A: str = False
A: int = True
A: int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A: List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A: Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A: Dict = True
A: Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A: Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A: int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A: str = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A: Any = True
A: List[str] = True
A: Optional[int] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A: int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A: str = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A: str = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
A: List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A: str = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A: Optional[Any] = outputs.hidden_states
A: int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A: int = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A , A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Union[str, Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A: Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self : Any ) -> Dict:
'''simple docstring'''
A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def _snake_case ( self : Any ) -> str:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: Optional[int] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( ) -> Optional[Any]:
A: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A: Union[str, Any] = self.default_image_processor
A: str = prepare_img()
A: Any = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A: Union[str, Any] = model(inputs.pixel_values )
# verify outputs
A: Tuple = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A: Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase__ , )
A: Dict = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify postprocessing
A: List[Any] = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A: str = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase__ )
A: Any = [75, 75, 17, 63, 17]
A: Optional[Any] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 319 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class __lowerCAmelCase ( _snake_case ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase : Optional[Any] = Features({'audio': Audio()} )
__UpperCAmelCase : int = Features({'labels': ClassLabel} )
__UpperCAmelCase : Tuple = 'audio'
__UpperCAmelCase : Dict = 'labels'
def __UpperCAmelCase ( self , _a ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def __UpperCAmelCase ( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[str] = "markuplm"
def __init__( self: Any ,lowerCamelCase_: Optional[Any]=30522 ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: str=12 ,lowerCamelCase_: List[str]=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: int=0.0_2 ,lowerCamelCase_: Optional[Any]=1e-12 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: str=0 ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=256 ,lowerCamelCase_: Optional[int]=1024 ,lowerCamelCase_: str=216 ,lowerCamelCase_: Optional[Any]=1001 ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: List[Any]=50 ,lowerCamelCase_: Optional[int]="absolute" ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: str=None ,**lowerCamelCase_: List[Any] ,) -> Optional[int]:
super().__init__(
pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : List[Any] = use_cache
UpperCAmelCase_ : int = classifier_dropout
# additional properties
UpperCAmelCase_ : Tuple = max_depth
UpperCAmelCase_ : Union[str, Any] = max_xpath_tag_unit_embeddings
UpperCAmelCase_ : str = max_xpath_subs_unit_embeddings
UpperCAmelCase_ : Optional[int] = tag_pad_id
UpperCAmelCase_ : Tuple = subs_pad_id
UpperCAmelCase_ : Union[str, Any] = xpath_unit_hidden_size
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( a_: Iterable[str], a_: int ):
_UpperCAmelCase : List[str] = iter(a_ )
while True:
_UpperCAmelCase : int = tuple(itertools.islice(a_, a_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : List[Any] = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCAmelCase : List[str] = ""
if len(a_ ) < 2:
return dirty
for i in range(len(a_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( a_: str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_UpperCAmelCase : Dict = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCAmelCase : str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a_ )
return table
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Tuple = generate_table(a_ )
_UpperCAmelCase : Union[str, Any] = prepare_input(a_ )
_UpperCAmelCase : List[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_, 2 ):
_UpperCAmelCase : Optional[Any] = divmod(table.index(a_ ), 5 )
_UpperCAmelCase : Union[str, Any] = divmod(table.index(a_ ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Optional[int] = generate_table(a_ )
_UpperCAmelCase : Optional[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_, 2 ):
_UpperCAmelCase : Optional[int] = divmod(table.index(a_ ), 5 )
_UpperCAmelCase : Any = divmod(table.index(a_ ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 357 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( A_ ):
def __init__( self : str , snake_case_ : List[Any]=None , **snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 247 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE = (720, 1280) # Height, Width
SCREAMING_SNAKE_CASE = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE = 1 / 100
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = 250
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__, A__ = get_dataset(lowercase_ , lowercase_ )
for index in range(lowercase_ ):
A__ = random.sample(range(len(lowercase_ ) ) , 4 )
A__, A__, A__ = update_image_and_anno(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , filter_scale=lowercase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ = random_chars(32 )
A__ = path.split(os.sep )[-1].rsplit("." , 1 )[0]
A__ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
A__ = []
for anno in new_annos:
A__ = anno[3] - anno[1]
A__ = anno[4] - anno[2]
A__ = anno[1] + width / 2
A__ = anno[2] + height / 2
A__ = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase_ )
with open(f"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[list, list]:
A__ = []
A__ = []
for label_file in glob.glob(os.path.join(lowercase_ , "*.txt" ) ):
A__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowercase_ ) as in_file:
A__ = in_file.readlines()
A__ = os.path.join(lowercase_ , f"""{label_name}.jpg""" )
A__ = []
for obj_list in obj_lists:
A__ = obj_list.rstrip("\n" ).split(" " )
A__ = float(obj[1] ) - float(obj[3] ) / 2
A__ = float(obj[2] ) - float(obj[4] ) / 2
A__ = float(obj[1] ) + float(obj[3] ) / 2
A__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , ) -> tuple[list, list, str]:
A__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = int(scale_x * output_size[1] )
A__ = int(scale_y * output_size[0] )
A__ = []
A__ = []
for i, index in enumerate(lowercase_ ):
A__ = all_img_list[index]
path_list.append(lowercase_ )
A__ = all_annos[index]
A__ = cva.imread(lowercase_ )
if i == 0: # top-left
A__ = cva.resize(lowercase_ , (divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = bbox[2] * scale_y
A__ = bbox[3] * scale_x
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A__ = cva.resize(lowercase_ , (output_size[1] - divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = bbox[2] * scale_y
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A__ = cva.resize(lowercase_ , (divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = bbox[3] * scale_x
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A__ = cva.resize(
lowercase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 247 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : List[str] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : List[Any] = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : Tuple = {
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_A : Dict = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_A : Dict = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_A : Optional[Any] = {
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : int = checkpoint[f"{old_prefix}.in_layers.0.weight"]
lowerCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
lowerCamelCase__ : int = checkpoint[f"{old_prefix}.in_layers.2.weight"]
lowerCamelCase__ : List[str] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
lowerCamelCase__ : Optional[Any] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
lowerCamelCase__ : Tuple = checkpoint[f"{old_prefix}.out_layers.0.weight"]
lowerCamelCase__ : int = checkpoint[f"{old_prefix}.out_layers.0.bias"]
lowerCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
lowerCamelCase__ : List[str] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
lowerCamelCase__ : str = checkpoint[f"{old_prefix}.skip_connection.weight"]
lowerCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
lowerCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.norm.weight"]
lowerCamelCase__ : str = checkpoint[f"{old_prefix}.norm.bias"]
lowerCamelCase__ : Optional[int] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[int] = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[int] = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : int = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[Any] = checkpoint['''time_embed.0.weight''']
lowerCamelCase__ : Optional[Any] = checkpoint['''time_embed.0.bias''']
lowerCamelCase__ : List[str] = checkpoint['''time_embed.2.weight''']
lowerCamelCase__ : Optional[Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Dict = checkpoint['''label_emb.weight''']
lowerCamelCase__ : Dict = checkpoint['''input_blocks.0.0.weight''']
lowerCamelCase__ : str = checkpoint['''input_blocks.0.0.bias''']
lowerCamelCase__ : Dict = unet_config['''down_block_types''']
lowerCamelCase__ : Any = unet_config['''layers_per_block''']
lowerCamelCase__ : List[str] = unet_config['''attention_head_dim''']
lowerCamelCase__ : List[Any] = unet_config['''block_out_channels''']
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Dict = channels_list[i]
lowerCamelCase__ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = f"down_blocks.{i}.resnets.{j}"
lowerCamelCase__ : List[str] = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : List[Any] = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : int = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , has_skip=UpperCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase ):
lowerCamelCase__ : Tuple = f"down_blocks.{i}.resnets.{j}"
lowerCamelCase__ : Any = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : Dict = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , has_skip=UpperCAmelCase )
lowerCamelCase__ : List[Any] = f"down_blocks.{i}.attentions.{j}"
lowerCamelCase__ : List[str] = f"input_blocks.{current_layer}.1"
lowerCamelCase__ : Tuple = convert_attention(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
current_layer += 1
if i != len(UpperCAmelCase ) - 1:
lowerCamelCase__ : Dict = f"down_blocks.{i}.downsamplers.0"
lowerCamelCase__ : Optional[int] = f"input_blocks.{current_layer}.0"
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
current_layer += 1
lowerCamelCase__ : Optional[int] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Optional[int] = '''mid_block.resnets.0'''
lowerCamelCase__ : List[Any] = '''middle_block.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = '''mid_block.attentions.0'''
lowerCamelCase__ : Union[str, Any] = '''middle_block.1'''
lowerCamelCase__ : List[str] = convert_attention(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : int = '''mid_block.resnets.1'''
lowerCamelCase__ : Any = '''middle_block.2'''
lowerCamelCase__ : str = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Tuple = unet_config['''up_block_types''']
for i, layer_type in enumerate(UpperCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : Union[str, Any] = f"up_blocks.{i}.resnets.{j}"
lowerCamelCase__ : Tuple = f"output_blocks.{current_layer}.0"
lowerCamelCase__ : str = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , has_skip=UpperCAmelCase )
current_layer += 1
if i != len(UpperCAmelCase ) - 1:
lowerCamelCase__ : List[Any] = f"up_blocks.{i}.upsamplers.0"
lowerCamelCase__ : Dict = f"output_blocks.{current_layer-1}.1"
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : List[str] = f"up_blocks.{i}.resnets.{j}"
lowerCamelCase__ : List[Any] = f"output_blocks.{current_layer}.0"
lowerCamelCase__ : int = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , has_skip=UpperCAmelCase )
lowerCamelCase__ : Dict = f"up_blocks.{i}.attentions.{j}"
lowerCamelCase__ : str = f"output_blocks.{current_layer}.1"
lowerCamelCase__ : Any = convert_attention(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
current_layer += 1
if i != len(UpperCAmelCase ) - 1:
lowerCamelCase__ : List[Any] = f"up_blocks.{i}.upsamplers.0"
lowerCamelCase__ : Tuple = f"output_blocks.{current_layer-1}.2"
lowerCamelCase__ : Any = convert_resnet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = checkpoint['''out.0.weight''']
lowerCamelCase__ : List[str] = checkpoint['''out.0.bias''']
lowerCamelCase__ : Any = checkpoint['''out.2.weight''']
lowerCamelCase__ : Tuple = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_A : Optional[Any] = parser.parse_args()
_A : int = strabool(args.class_cond)
_A : int = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : str = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_A : int = None
_A : Any = con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Any = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
_A : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
_A : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 265 |
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCamelCase__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert("""RGB""" )
_lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_lowerCAmelCase = transform(snake_case ).unsqueeze(0 ).to(snake_case )
return image
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
_lowerCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , snake_case )
if "blocks" in key:
_lowerCAmelCase = re.sub(R"""blocks""" , """layers""" , snake_case )
if "attn" in key:
_lowerCAmelCase = re.sub(R"""attn""" , """self_attn""" , snake_case )
if "norm1" in key:
_lowerCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , snake_case )
if "norm2" in key:
_lowerCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , snake_case )
if "encoder.norm" in key:
_lowerCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , snake_case )
if "encoder.patch_embed.proj" in key:
_lowerCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , snake_case )
if "encoder.pos_embed" in key:
_lowerCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , snake_case )
if "encoder.cls_token" in key:
_lowerCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , snake_case )
if "self_attn" in key:
_lowerCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , snake_case )
return key
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = BlipConfig.from_pretrained(snake_case )
else:
_lowerCAmelCase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_lowerCAmelCase = BlipForConditionalGeneration(snake_case ).eval()
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
_lowerCAmelCase = blip_decoder(pretrained=snake_case , image_size=3_84 , vit="""base""" )
_lowerCAmelCase = pt_model.eval()
_lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
hf_model.load_state_dict(snake_case )
_lowerCAmelCase = 3_84
_lowerCAmelCase = load_demo_image(image_size=snake_case , device="""cpu""" )
_lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = tokenizer(["""a picture of"""] ).input_ids
_lowerCAmelCase = hf_model.generate(snake_case , snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCAmelCase = hf_model.generate(snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
_lowerCAmelCase = blip_vqa(pretrained=snake_case , image_size=snake_case , vit="""base""" )
vqa_model.eval()
_lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
_lowerCAmelCase = BlipForQuestionAnswering(snake_case )
hf_vqa_model.load_state_dict(snake_case )
_lowerCAmelCase = ["""How many dogs are in this image?"""]
_lowerCAmelCase = tokenizer(snake_case , return_tensors="""pt""" ).input_ids
_lowerCAmelCase = hf_vqa_model.generate(snake_case , snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
_lowerCAmelCase = blip_itm(pretrained=snake_case , image_size=snake_case , vit="""base""" )
itm_model.eval()
_lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
_lowerCAmelCase = BlipForImageTextRetrieval(snake_case )
_lowerCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
_lowerCAmelCase = tokenizer(
snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case )
hf_itm_model.eval()
_lowerCAmelCase = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
_lowerCAmelCase = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 82 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Tuple= logging.get_logger(__name__)
_a : str= {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_a : Optional[int]= {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_a : Tuple= {"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[int] = BlenderbotTokenizer
def __init__(self : int , _A : Tuple=None , _A : str=None , _A : Union[str, Any]=None , _A : str="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]="</s>" , _A : List[str]="<s>" , _A : Union[str, Any]="<unk>" , _A : Any="<pad>" , _A : str="<mask>" , _A : Union[str, Any]=False , _A : Optional[Any]=True , **_A : Optional[int] , ) -> int:
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : Dict = getattr(_A , pre_tok_state.pop('type'))
__snake_case : int = add_prefix_space
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : str = add_prefix_space
__snake_case : Dict = 'post_processor'
__snake_case : Optional[int] = getattr(self.backend_tokenizer , _A , _A)
if tokenizer_component_instance:
__snake_case : Any = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : int = tuple(state['sep'])
if "cls" in state:
__snake_case : int = tuple(state['cls'])
__snake_case : Any = False
if state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : int = add_prefix_space
__snake_case : Dict = True
if state.get('trim_offsets' , _A) != trim_offsets:
__snake_case : int = trim_offsets
__snake_case : Dict = True
if changes_to_apply:
__snake_case : List[str] = getattr(_A , state.pop('type'))
__snake_case : Optional[int] = component_class(**_A)
setattr(self.backend_tokenizer , _A , _A)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase (self : Optional[int]) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> List[Any]:
__snake_case : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value
__snake_case : Optional[int] = value
def _lowercase (self : Tuple , *_A : int , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : List[str] = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A)
def _lowercase (self : Any , *_A : Union[str, Any] , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : Tuple = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A)
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[str] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : Any , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowercase (self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None) -> Tuple:
return token_ids_a + [self.eos_token_id]
def _lowercase (self : Optional[int] , _A : "Conversation") -> List[int]:
__snake_case : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(_A)
__snake_case : Union[str, Any] = ' '.join(_A)
__snake_case : List[str] = self.encode(_A)
if len(_A) > self.model_max_length:
__snake_case : str = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 172 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__lowerCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCamelCase ( ) -> int:
_a : Union[str, Any] = os.path.dirname(os.path.realpath(_A ) )
_a : Union[str, Any] = os.path.join(_A , 'words.txt' )
_a : List[str] = ''
with open(_A ) as f:
_a : List[str] = f.readline()
_a : List[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_a : Tuple = [
word
for word in [sum(ord(_A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_A )
if __name__ == "__main__":
print(solution())
| 353 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : str=32 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=[0, 1, 2, 3] ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[Any]=0.02 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[1, 384, 24, 24] ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : List[str]=None ,):
_a : int = parent
_a : Optional[Any] = batch_size
_a : str = image_size
_a : str = patch_size
_a : Any = num_channels
_a : Optional[Any] = is_training
_a : str = use_labels
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : str = backbone_out_indices
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : List[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Tuple = num_labels
_a : Tuple = backbone_featmap_shape
_a : Optional[int] = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Optional[int] = num_patches + 1
def __lowercase ( self : Dict ):
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
_a : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : List[Any] = self.num_labels
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ):
_a : Optional[int] = self.num_labels
_a : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : List[Any] ):
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : List[Any] = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : int = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Any ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = False
_a : List[Any] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : List[str] = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : Optional[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : int = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 107 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_A : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> None:
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229 | '''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case (_lowerCAmelCase):
__A : str =(DPMSolverSDEScheduler,)
__A : Union[str, Any] =10
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : Union[str, Any] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase__ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ ,beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : int = self.dummy_model()
UpperCAmelCase_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Any = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : str = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = output.prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase_ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : int = self.dummy_model()
UpperCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : List[Any] = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : int = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = output.prev_sample
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps ,device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = output.prev_sample
UpperCAmelCase_ : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ ,use_karras_sigmas=SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps ,device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[Any] = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
UpperCAmelCase_ : Dict = sample.to(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : int = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = output.prev_sample
UpperCAmelCase_ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 365 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] =["torch", "torchsde"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
_lowerCamelCase : int
_lowerCamelCase : TreeNode | None = None
_lowerCamelCase : TreeNode | None = None
__SCREAMING_SNAKE_CASE :Any = namedtuple('''CoinsDistribResult''', '''moves excess''')
def UpperCAmelCase_ ( __lowercase : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowercase ) != count_coins(__lowercase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowercase )
+ abs(__lowercase )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowercase , __lowercase )
return get_distrib(__lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 1 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = len(lowerCAmelCase_ )
_lowerCAmelCase : List[str] = [0] * len_array
if len_array > 0:
_lowerCAmelCase : List[str] = array[0]
for i in range(1 , lowerCAmelCase_ ):
_lowerCAmelCase : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_:int = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
A : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def _lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token, repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(_A )
model.push_to_hub("""test-model-flax""", use_auth_token=self._token )
A : List[Any] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A, repo_id="""test-model-flax""", push_to_hub=_A, use_auth_token=self._token )
A : Any = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
def _lowerCAmelCase ( self ):
A : List[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(_A )
model.push_to_hub("""valid_org/test-model-flax-org""", use_auth_token=self._token )
A : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : str = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A, repo_id="""valid_org/test-model-flax-org""", push_to_hub=_A, use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : Dict = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1e-3, msg=f'''{key} not identical''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = True
A : int = flatten_dict(modela.params )
A : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A : Optional[Any] = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : List[Any] = FlaxBertModel(_A )
A : Optional[int] = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ) )
with self.assertRaises(_A ):
A : str = FlaxBertModel.from_pretrained(_A )
A : Tuple = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : Dict = FlaxBertModel(_A )
A : str = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ), max_shard_size="""10KB""" )
with self.assertRaises(_A ):
A : int = FlaxBertModel.from_pretrained(_A )
A : str = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def _lowerCAmelCase ( self ):
A : str = "bert"
A : Any = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(_A ):
A : Dict = FlaxBertModel.from_pretrained(_A )
A : List[str] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( self ):
A : Any = "bert"
A : Tuple = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(_A ):
A : Dict = FlaxBertModel.from_pretrained(_A )
A : str = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
def _a ( a :int = 10 , a :int = 1_000 , a :bool = True ) -> int:
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _a ( a :int , a :int ) -> int:
return int((number_a + number_a) / 2 )
def _a ( a :int , a :int , a :int ) -> None:
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a :int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
a = lower
a = higher
a = []
while True:
a = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
a = number
elif answer(UpperCamelCase__ ) == "high":
a = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _a ( ) -> None:
a = int(input('''Enter lower value : ''' ).strip() )
a = int(input('''Enter high value : ''' ).strip() )
a = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> str:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> int:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> Any:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='np' )
snake_case = processor(images=__lowerCAmelCase, return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_torch
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = [torch.ones((1, 3, 5, 5) )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = processor.post_process_masks(
__lowerCAmelCase, torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
snake_case = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Dict:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> str:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> str:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='np' )
snake_case = processor(images=__lowerCAmelCase, return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
@require_tf
def _lowerCamelCase ( self ) -> List[str]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = [tf.ones((1, 3, 5, 5) )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='tf' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = processor.post_process_masks(
__lowerCAmelCase, tf.convert_to_tensor(__lowerCAmelCase ), tf.convert_to_tensor(__lowerCAmelCase ), return_tensors='tf', )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors='tf' )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors='tf' )
@require_vision
@require_torchvision
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def _lowerCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowerCamelCase ( self ) -> Tuple:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case = [tf.convert_to_tensor(__lowerCAmelCase )]
snake_case = [torch.tensor(__lowerCAmelCase )]
snake_case = [[1764, 2646]]
snake_case = [[683, 1024]]
snake_case = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='tf' )
snake_case = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowerCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__lowerCAmelCase )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__lowerCAmelCase, return_tensors='pt' )['pixel_values'].numpy()
snake_case = processor(images=__lowerCAmelCase, return_tensors='pt' )['pixel_values'].numpy()
snake_case = image_processor(__lowerCAmelCase, return_tensors='tf' )['pixel_values'].numpy()
snake_case = processor(images=__lowerCAmelCase, return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
| 360 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__a :str = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __snake_case ( __UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
if subparsers is not None:
A_ = subparsers.add_parser("tpu-config" ,description=_description )
else:
A_ = argparse.ArgumentParser("Accelerate tpu-config command" ,description=_description )
# Core arguments
A_ = parser.add_argument_group(
"Config Arguments" ,"Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" ,type=__UpperCamelCase ,default=__UpperCamelCase ,help="Path to the config file to use for accelerate." ,)
config_args.add_argument(
"--tpu_name" ,default=__UpperCamelCase ,help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." ,)
config_args.add_argument(
"--tpu_zone" ,default=__UpperCamelCase ,help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." ,)
A_ = parser.add_argument_group("TPU Arguments" ,"Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" ,action="store_true" ,help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." ,)
pod_args.add_argument(
"--command_file" ,default=__UpperCamelCase ,help="The path to the file containing the commands to run on the pod on startup." ,)
pod_args.add_argument(
"--command" ,action="append" ,nargs="+" ,help="A command to run on the pod. Can be passed multiple times." ,)
pod_args.add_argument(
"--install_accelerate" ,action="store_true" ,help="Whether to install accelerate on the pod. Defaults to False." ,)
pod_args.add_argument(
"--accelerate_version" ,default="latest" ,help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." ,)
pod_args.add_argument(
"--debug" ,action="store_true" ,help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
A_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A_ = defaults.command_file
if not args.command and defaults.commands is not None:
A_ = defaults.commands
if not args.tpu_name:
A_ = defaults.tpu_name
if not args.tpu_zone:
A_ = defaults.tpu_zone
if args.accelerate_version == "dev":
A_ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
A_ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) ,__UpperCamelCase ):
A_ = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file ,"r" ) as f:
A_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,__UpperCamelCase ):
A_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A_ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A_ = "; ".join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A_ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCamelCase )}''' )
return
subprocess.run(__UpperCamelCase )
print("Successfully setup pod." )
def __snake_case ( ):
"""simple docstring"""
A_ = tpu_command_parser()
A_ = parser.parse_args()
tpu_command_launcher(__UpperCamelCase ) | 312 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward() | 312 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=3,__lowerCamelCase=18,__lowerCamelCase=30,__lowerCamelCase=400,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=[0.5, 0.5, 0.5],__lowerCamelCase=[0.5, 0.5, 0.5],):
A__ = size if size is not None else {'''shortest_edge''': 18}
A__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
A__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''size''' ) )
def UpperCamelCase ( self ):
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size,{'''height''': 18, '''width''': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=42,crop_size=84 )
self.assertEqual(image_processor.size,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size,{'''height''': 84, '''width''': 84} )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
),)
| 362 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__SCREAMING_SNAKE_CASE = '''CIDAS/clipseg-rd64-refined'''
__SCREAMING_SNAKE_CASE = '''image_segmenter'''
__SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE = ['''image''', '''text''']
__SCREAMING_SNAKE_CASE = ['''image''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''vision'''] )
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.pre_processor(text=[label],images=[image],padding=__lowerCamelCase,return_tensors='''pt''' )
def UpperCamelCase ( self,__lowerCamelCase ):
with torch.no_grad():
A__ = self.model(**__lowerCamelCase ).logits
return logits
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = outputs.cpu().detach().numpy()
A__ = 0
A__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 39 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = 'speech_to_text_2'
UpperCAmelCase__ : Dict = ['past_key_values']
UpperCAmelCase__ : Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , UpperCamelCase_: List[str]=1_00_00 , UpperCamelCase_: int=6 , UpperCamelCase_: Dict=20_48 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: int="relu" , UpperCamelCase_: Optional[Any]=2_56 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.02 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Any=2 , UpperCamelCase_: Optional[int]=10_24 , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = decoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 |
snake_case : str = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 94 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase__ ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """LayoutLMv2ImageProcessor"""
lowercase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features['words']
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F" {len(__snake_case )} and {len(__snake_case )}" )
return images_with_overflow
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def A__ ( self ) -> Any:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 358 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint["""input_conv.weight_g"""]
UpperCamelCase = checkpoint["""input_conv.weight_v"""]
UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = np.load(__UpperCamelCase )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 183 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : float , snake_case : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase: str = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ):
_lowercase : Optional[Any] = """backbone.""" if is_semantic else """"""
_lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(F"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
_lowercase : Any = """backbone.""" if is_semantic else """"""
# queries, keys and values
_lowercase : int = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
_lowercase : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
_lowercase : List[str] = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
_lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowercase : Optional[int] = q_bias
_lowercase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_lowercase : Dict = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
_lowercase : Optional[Any] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
_lowercase : List[str] = gamma_a
_lowercase : Optional[int] = gamma_a
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = dct.pop(__UpperCAmelCase )
_lowercase : Optional[Any] = val
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowercase : List[str] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
_lowercase : List[str] = False if """rvlcdip""" in checkpoint_url else True
_lowercase : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=__UpperCAmelCase , use_mask_token=__UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_lowercase : int = 1024
_lowercase : Dict = 4096
_lowercase : Dict = 24
_lowercase : str = 16
# labels
if "rvlcdip" in checkpoint_url:
_lowercase : List[Any] = 16
_lowercase : Union[str, Any] = """huggingface/label-files"""
_lowercase : Any = """rvlcdip-id2label.json"""
_lowercase : Optional[int] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowercase : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowercase : Dict = idalabel
_lowercase : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_lowercase : List[str] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" )["""model"""]
_lowercase : Tuple = create_rename_keys(__UpperCAmelCase , has_lm_head=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , has_lm_head=__UpperCAmelCase )
# load HuggingFace model
_lowercase : Dict = BeitForMaskedImageModeling(__UpperCAmelCase ) if has_lm_head else BeitForImageClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
_lowercase : Union[str, Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCAmelCase )
_lowercase : Optional[int] = prepare_img()
_lowercase : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
_lowercase : List[str] = encoding["""pixel_values"""]
_lowercase : str = model(__UpperCAmelCase )
_lowercase : Optional[int] = outputs.logits
# verify logits
_lowercase : int = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__UpperCAmelCase ), "Shape of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
if has_lm_head:
_lowercase : Dict = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
_lowercase : Any = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
UpperCAmelCase: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
UpperCAmelCase: Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> Tuple:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase )
__lowerCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
else:
__lowerCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase )
__lowerCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
__lowerCamelCase = ['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase = key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase = prophet
__lowerCamelCase = prophet_old
else:
__lowerCamelCase = prophet.prophetnet
__lowerCamelCase = prophet_old.model
__lowerCamelCase = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase = mapping[attribute]
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0:
__lowerCamelCase = attribute
elif hasattr(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
__lowerCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
__lowerCamelCase = True
break
elif attribute in special_keys and hasattr(__lowerCAmelCase , '''in_proj_weight''' ):
__lowerCamelCase = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase = True
break
if attribute.isdigit():
__lowerCamelCase = model[int(__lowerCAmelCase )]
__lowerCamelCase = old_model[int(__lowerCAmelCase )]
else:
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase = old_model
else:
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
__lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 270 |
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
super().__init__(*_snake_case , **_snake_case )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict ):
__lowercase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
__lowercase : List[Any] = self.values[key]
def snake_case_ ( self : Any ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 156 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCAmelCase : Optional[int] = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] , snake_case__ : list[T] , snake_case__ : Callable[[T, T], T] ):
'''simple docstring'''
UpperCAmelCase__ : Any | T = None
UpperCAmelCase__ : int = len(snake_case__ )
UpperCAmelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase__ : List[str] = fnc
self.build()
def __a ( self : Optional[int] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase__ : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __a ( self : List[Any] , snake_case__ : int , snake_case__ : T ):
'''simple docstring'''
p += self.N
UpperCAmelCase__ : int = v
while p > 1:
UpperCAmelCase__ : str = p // 2
UpperCAmelCase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __a ( self : int , snake_case__ : int , snake_case__ : int ): # noqa: E741
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = l + self.N, r + self.N
UpperCAmelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase__ : Optional[Any] = self.st[l] if res is None else self.fn(snake_case__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase__ : str = self.st[r] if res is None else self.fn(snake_case__ , self.st[r] )
UpperCAmelCase__ , UpperCAmelCase__ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCAmelCase : Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCAmelCase : Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCAmelCase : List[Any] = SegmentTree(test_array, min)
_lowerCAmelCase : Tuple = SegmentTree(test_array, max)
_lowerCAmelCase : int = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
for i in range(len(snake_case ) ):
for j in range(snake_case , len(snake_case ) ):
UpperCAmelCase__ : Optional[Any] = reduce(snake_case , test_array[i : j + 1] )
UpperCAmelCase__ : List[Any] = reduce(snake_case , test_array[i : j + 1] )
UpperCAmelCase__ : Optional[Any] = reduce(lambda snake_case , snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case , snake_case )
assert max_range == max_segment_tree.query(snake_case , snake_case )
assert sum_range == sum_segment_tree.query(snake_case , snake_case )
test_all_segments()
for index, value in test_updates.items():
_lowerCAmelCase : Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 298 |
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) as f:
__UpperCAmelCase = json.load(_A )
__UpperCAmelCase = {}
__UpperCAmelCase = []
__UpperCAmelCase = []
for key, info in class_info.items():
__UpperCAmelCase = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(_A ) )
__UpperCAmelCase = thing_ids
__UpperCAmelCase = class_names
return metadata
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , _lowercase : Optional[int] , _lowercase : Any=7 , _lowercase : str=3 , _lowercase : Optional[int]=30 , _lowercase : List[str]=4_00 , _lowercase : Optional[int]=None , _lowercase : Any=True , _lowercase : int=True , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : Optional[Any]=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=10 , _lowercase : Tuple=False , _lowercase : Optional[int]=2_55 , _lowercase : Tuple="shi-labs/oneformer_demo" , _lowercase : str="ade20k_panoptic.json" , _lowercase : int=10 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = class_info_file
__UpperCAmelCase = prepare_metadata(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = num_text
__UpperCAmelCase = repo_path
# for the post_process_functions
__UpperCAmelCase = 2
__UpperCAmelCase = 10
__UpperCAmelCase = 10
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = num_labels
__UpperCAmelCase = do_reduce_labels
__UpperCAmelCase = ignore_index
def a ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : int=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
__UpperCAmelCase = self.size['''shortest_edge''']
elif w > h:
__UpperCAmelCase = self.size['''shortest_edge''']
__UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
__UpperCAmelCase = self.size['''shortest_edge''']
__UpperCAmelCase = self.size['''shortest_edge''']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda _lowercase : item[0] )[0]
__UpperCAmelCase = max(UpperCamelCase__ , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
def a ( self : Tuple ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a__ : str = image_processing_class
def a ( self : List[Any] ):
__UpperCAmelCase = OneFormerImageProcessorTester(self )
@property
def a ( self : str ):
return self.image_processing_tester.prepare_image_processor_dict()
def a ( self : Optional[int] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''ignore_index''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''class_info_file''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_text''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''repo_path''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''metadata''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_reduce_labels''' ) )
def a ( self : Any ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : List[Any] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : List[str] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Optional[Any] , _lowercase : Optional[int]=False , _lowercase : List[str]=False , _lowercase : Optional[int]="np" ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__UpperCAmelCase = self.image_processing_tester.num_labels
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
if with_segmentation_maps:
__UpperCAmelCase = num_labels
if is_instance_map:
__UpperCAmelCase = list(range(UpperCamelCase__ ) ) * 2
__UpperCAmelCase = dict(enumerate(UpperCamelCase__ ) )
__UpperCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__UpperCAmelCase = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations]
__UpperCAmelCase = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors='''pt''' , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , )
return inputs
def a ( self : Optional[Any] ):
pass
def a ( self : int ):
def common(_lowercase : List[str]=False , _lowercase : int=None ):
__UpperCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ )
__UpperCAmelCase = inputs['''mask_labels''']
__UpperCAmelCase = inputs['''class_labels''']
__UpperCAmelCase = inputs['''pixel_values''']
__UpperCAmelCase = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase__ )
common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' )
common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = np.zeros((20, 50) )
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = binary_mask_to_rle(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def a ( self : Any ):
__UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__UpperCAmelCase = image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__UpperCAmelCase = image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 332 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
from ....utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=None , A_=2048 ) -> Any:
__UpperCamelCase =config.__dict__
__UpperCamelCase =modal_hidden_size
if num_labels:
__UpperCamelCase =num_labels
| 353 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _a ( self ) -> Optional[int]:
__UpperCamelCase ={}
if self.train_dir is not None:
__UpperCamelCase =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase =self.validation_dir
__UpperCamelCase =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
default=A_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase =training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
__UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase =split['train']
__UpperCamelCase =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
__UpperCamelCase =ds['train'].column_names
else:
__UpperCamelCase =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase ='image'
elif "img" in column_names:
__UpperCamelCase ='img'
else:
__UpperCamelCase =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCamelCase =image_processor.size['shortest_edge']
else:
__UpperCamelCase =(image_processor.size['height'], image_processor.size['width'])
__UpperCamelCase =Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Compute absolute learning rate
__UpperCamelCase =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__UpperCamelCase =Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__UpperCamelCase =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase =trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
__UpperCamelCase ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 117 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 304 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
# fmt: off
A : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A : Union[str, Any] = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
A : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = self.get_rust_tokenizer()
A : Optional[int] = self.get_image_processor()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Tuple = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Tuple = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : str = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = self.prepare_image_inputs()
A : Optional[Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Any = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """lower newer"""
A : Union[str, Any] = processor(text=lowerCamelCase__, return_tensors="""np""" )
A : str = tokenizer(lowerCamelCase__, return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[str] = """lower newer"""
A : Any = self.prepare_image_inputs()
A : Tuple = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : str = """google/owlvit-base-patch32"""
A : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[int] = processor(text=lowerCamelCase__ )
A : Any = 16
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Tuple = """google/owlvit-base-patch32"""
A : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : int = [["""cat""", """nasa badge"""], ["""person"""]]
A : List[Any] = processor(text=lowerCamelCase__ )
A : Dict = 16
A : List[str] = len(lowerCamelCase__ )
A : List[str] = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = """google/owlvit-base-patch32"""
A : int = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = 16
A : Optional[Any] = inputs["""input_ids"""]
A : Optional[int] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = processor(images=lowerCamelCase__, query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[Any] = processor.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 1 |
lowerCamelCase : List[Any] = 9.8_0665
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 204 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ):
'''simple docstring'''
lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_00 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : Tuple = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 204 | 1 |
import string
def A__ ( lowerCamelCase ) -> None:
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase_: List[Any] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase_: List[str] = string.ascii_uppercase.find(lowerCamelCase )
UpperCamelCase_: List[str] = num - key
if num < 0:
UpperCamelCase_: str = num + len(string.ascii_uppercase )
UpperCamelCase_: Union[str, Any] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase_: Optional[Any] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def A__ ( ) -> None:
UpperCamelCase_: Dict = input("""Encrypted message: """ )
UpperCamelCase_: List[str] = message.upper()
decrypt(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 223 |
def A__ ( lowerCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 223 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowerCamelCase (UpperCamelCase_ ):
'''simple docstring'''
_snake_case : List[str] = """imagegpt"""
_snake_case : Dict = ["""past_key_values"""]
_snake_case : int = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _UpperCamelCase=5_1_2 + 1 , _UpperCamelCase=3_2 * 3_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2_4 , _UpperCamelCase=8 , _UpperCamelCase=None , _UpperCamelCase="quick_gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Optional[Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Dict = n_head
UpperCAmelCase_ : Dict = n_inner
UpperCAmelCase_ : Tuple = activation_function
UpperCAmelCase_ : int = resid_pdrop
UpperCAmelCase_ : List[str] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : Optional[Any] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Union[str, Any] = reorder_and_upcast_attn
UpperCAmelCase_ : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_a , **_a )
class lowerCamelCase (UpperCamelCase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = 3 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Dict = self._generate_dummy_images(_a , _a , _a , _a )
UpperCAmelCase_ : Optional[Any] = dict(preprocessor(images=_a , return_tensors=_a ) )
return inputs
| 29 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a :str = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
a :List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
a :int = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __a (datasets.Metric):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _a ( self , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0
for i, j in zip(_a , _a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a , _a ) else 0.0
SCREAMING_SNAKE_CASE__ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 132 | 0 |
"""simple docstring"""
lowercase__ = {str(digit): digit**5 for digit in range(10)}
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(__UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 161 |
"""simple docstring"""
class __lowerCamelCase ( A__ ):
'''simple docstring'''
pass
class __lowerCamelCase ( A__ ):
'''simple docstring'''
pass
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] ):
lowerCAmelCase_ : Optional[int] = [
[],
[],
[],
]
def lowerCamelCase ( self : List[str] , a_ : int , a_ : int ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def lowerCamelCase ( self : Dict ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Any ):
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] ):
lowerCAmelCase_ : int = []
def lowerCamelCase ( self : int , a_ : int ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(a_ )
def lowerCamelCase ( self : List[str] ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
lowerCAmelCase_ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] ):
return str(self.queue )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 161 | 1 |
from __future__ import annotations
def __UpperCamelCase ( _A : List[Any] , _A : List[str] = None , _A : int = None , _A : Dict = False , ) ->tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__UpperCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
"a": 0.0_8_4_9_7,
"b": 0.0_1_4_9_2,
"c": 0.0_2_2_0_2,
"d": 0.0_4_2_5_3,
"e": 0.1_1_1_6_2,
"f": 0.0_2_2_2_8,
"g": 0.0_2_0_1_5,
"h": 0.0_6_0_9_4,
"i": 0.0_7_5_4_6,
"j": 0.0_0_1_5_3,
"k": 0.0_1_2_9_2,
"l": 0.0_4_0_2_5,
"m": 0.0_2_4_0_6,
"n": 0.0_6_7_4_9,
"o": 0.0_7_5_0_7,
"p": 0.0_1_9_2_9,
"q": 0.0_0_0_9_5,
"r": 0.0_7_5_8_7,
"s": 0.0_6_3_2_7,
"t": 0.0_9_3_5_6,
"u": 0.0_2_7_5_8,
"v": 0.0_0_9_7_8,
"w": 0.0_2_5_6_0,
"x": 0.0_0_1_5_0,
"y": 0.0_1_9_9_4,
"z": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__UpperCamelCase ) ):
lowerCamelCase_ =""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__UpperCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__UpperCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : Any ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__UpperCamelCase , key=__UpperCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
lowerCamelCase_
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 154 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPFeatureExtractor"""]
lowercase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( snake_case__: str = "AAPL" ) -> str:
UpperCAmelCase__ = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase__ = BeautifulSoup(requests.get(snake_case__ ).text , 'html.parser' )
UpperCAmelCase__ = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 351 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def UpperCamelCase_( snake_case__: Optional[Any] , snake_case__: Optional[int] , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Tuple , snake_case__: Dict , snake_case__: Any ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
UpperCAmelCase__ = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 335 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=99 , UpperCamelCase_=13 , UpperCamelCase_=16 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=2 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=30 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :Optional[int] = batch_size
UpperCamelCase__ :Any = decoder_seq_length
# For common tests
UpperCamelCase__ :List[Any] = self.decoder_seq_length
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :List[str] = use_attention_mask
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :Optional[Any] = d_model
UpperCamelCase__ :Any = d_model
UpperCamelCase__ :Tuple = decoder_layers
UpperCamelCase__ :List[Any] = decoder_layers
UpperCamelCase__ :Optional[Any] = decoder_ffn_dim
UpperCamelCase__ :int = decoder_attention_heads
UpperCamelCase__ :List[str] = decoder_attention_heads
UpperCamelCase__ :Optional[Any] = eos_token_id
UpperCamelCase__ :Any = bos_token_id
UpperCamelCase__ :Optional[Any] = pad_token_id
UpperCamelCase__ :List[str] = decoder_start_token_id
UpperCamelCase__ :Dict = use_cache
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Tuple = None
UpperCamelCase__ :List[Any] = decoder_seq_length
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :List[str] = 1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_attention_mask:
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ :str = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ :Tuple = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = True
UpperCamelCase__ :int = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
UpperCamelCase__ :Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
UpperCamelCase__ :Optional[int] = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ :Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ :Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Dict = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Dict = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ :Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ :Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = config_and_inputs
UpperCamelCase__ :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a = (TrOCRForCausalLM,) if is_torch_available() else ()
_a = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
_a = True
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
UpperCamelCase__ :Dict = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.inf
def set_batch_size(lowerCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = None ,**A__ ,):
super().__init__(
A__ ,split=A__ ,features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,streaming=A__ ,num_proc=A__ ,**A__ ,)
lowercase = path_or_paths if isinstance(A__ ,A__) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=A__ ,data_files=A__ ,features=A__ ,hash=A__ ,**A__ ,)
def A__ ( self):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,num_proc=self.num_proc ,)
lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features)
lowercase = parquet_writer_kwargs
def A__ ( self):
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike)):
with open(self.path_or_buf ,'''wb+''') as buffer:
lowercase = self._write(file_obj=A__ ,batch_size=A__ ,**self.parquet_writer_kwargs)
else:
lowercase = self._write(file_obj=self.path_or_buf ,batch_size=A__ ,**self.parquet_writer_kwargs)
return written
def A__ ( self ,A__ ,A__ ,**A__):
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' ,A__)
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(A__ ,schema=A__ ,**A__)
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,A__) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating parquet from Arrow format''' ,):
lowercase = query_table(
table=self.dataset._data ,key=slice(A__ ,offset + batch_size) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(A__)
written += batch.nbytes
writer.close()
return written
| 97 | 1 |
from math import isqrt
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase ) + 1 ) )
def UpperCAmelCase ( lowercase = 10**6 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 1
__lowercase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''') | 210 | import requests
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {'''Content-Type''': '''application/json'''}
__lowercase = requests.post(lowercase , json={'''text''': message_body} , headers=lowercase )
if response.status_code != 200:
__lowercase = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""") | 210 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "encodec"
def __init__( self : Optional[int] , lowercase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Tuple=24_000 , lowercase : str=1 , lowercase : Optional[Any]=False , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : str=128 , lowercase : Tuple=32 , lowercase : Dict=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Optional[int]="weight_norm" , lowercase : Tuple=7 , lowercase : Union[str, Any]=7 , lowercase : Dict=3 , lowercase : Union[str, Any]=2 , lowercase : List[Any]=True , lowercase : List[Any]="reflect" , lowercase : str=2 , lowercase : Any=2 , lowercase : Tuple=1.0 , lowercase : Dict=1_024 , lowercase : List[Any]=None , lowercase : Dict=True , **lowercase : str , ):
'''simple docstring'''
_snake_case = target_bandwidths
_snake_case = sampling_rate
_snake_case = audio_channels
_snake_case = normalize
_snake_case = chunk_length_s
_snake_case = overlap
_snake_case = hidden_size
_snake_case = num_filters
_snake_case = num_residual_layers
_snake_case = upsampling_ratios
_snake_case = norm_type
_snake_case = kernel_size
_snake_case = last_kernel_size
_snake_case = residual_kernel_size
_snake_case = dilation_growth_rate
_snake_case = use_causal_conv
_snake_case = pad_mode
_snake_case = compress
_snake_case = num_lstm_layers
_snake_case = trim_right_ratio
_snake_case = codebook_size
_snake_case = codebook_dim if codebook_dim is not None else hidden_size
_snake_case = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowercase_ )
@property
def A ( self : str ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : str ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 371 |
import random
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
_snake_case = [ord(lowercase ) for i in text]
_snake_case = []
_snake_case = []
for i in plain:
_snake_case = random.randint(1 , 300 )
_snake_case = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def A ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
_snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Optional[int] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k)) | 130 | 0 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False) ->None:
'''simple docstring'''
lowerCamelCase__: dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
lowerCamelCase__: Any =is_leaf
lowerCamelCase__: List[str] =prefix
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str) ->tuple[str, str, str]:
'''simple docstring'''
lowerCamelCase__: Any =0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : list[str]) ->None:
'''simple docstring'''
for word in words:
self.insert(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str) ->None:
'''simple docstring'''
if self.prefix == word:
lowerCamelCase__: Optional[int] =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase__: Any =RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
lowerCamelCase__: Union[str, Any] =self.nodes[word[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase__: Union[str, Any] =remaining_prefix
lowerCamelCase__: Optional[int] =self.nodes[matching_string[0]]
lowerCamelCase__: Dict =RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =aux_node
if remaining_word == "":
lowerCamelCase__: Dict =True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : str) ->bool:
'''simple docstring'''
lowerCamelCase__: Dict =self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str) ->bool:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
lowerCamelCase__: int =list(self.nodes.values())[0]
lowerCamelCase__: Any =merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase__: Tuple =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
lowerCamelCase__: Dict =False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase__: str =list(incoming_node.nodes.values())[0]
lowerCamelCase__: Any =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase__: Optional[Any] =merging_node.nodes
return True
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int = 0) ->None:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "")
for value in self.nodes.values():
value.print_tree(height + 1)
def lowerCAmelCase_ ( ) -> bool:
"""simple docstring"""
lowerCamelCase__: str ="banana bananas bandana band apple all beast".split()
lowerCamelCase__: List[Any] =RadixNode()
root.insert_many(__a )
assert all(root.find(__a ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Optional[int] =RadixNode()
lowerCamelCase__: Optional[int] ="banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__a )
print("Words:" , __a )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> List[str]:
lowerCAmelCase_ :List[str] = parent
lowerCAmelCase_ :str = batch_size
lowerCAmelCase_ :int = seq_length
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :Union[str, Any] = use_token_type_ids
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Tuple = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Any = hidden_dropout_prob
lowerCAmelCase_ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ :List[str] = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Optional[int] = type_sequence_label_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = num_labels
lowerCAmelCase_ :Tuple = num_choices
lowerCAmelCase_ :Optional[Any] = relative_attention
lowerCAmelCase_ :str = position_biased_input
lowerCAmelCase_ :List[Any] = pos_att_type
lowerCAmelCase_ :Any = scope
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :str = None
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Any = TFDebertaVaModel(config=__A )
lowerCAmelCase_ :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase_ :int = [input_ids, input_mask]
lowerCAmelCase_ :Union[str, Any] = model(__A )
lowerCAmelCase_ :Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :int = TFDebertaVaForMaskedLM(config=__A )
lowerCAmelCase_ :Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.num_labels
lowerCAmelCase_ :int = TFDebertaVaForSequenceClassification(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.num_labels
lowerCAmelCase_ :Dict = TFDebertaVaForTokenClassification(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = TFDebertaVaForQuestionAnswering(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[str] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :Tuple = config_and_inputs
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :Tuple = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :int = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = TFDebertaVaModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__A )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCAmelCase_ :Optional[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase_ :Optional[int] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Dict = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __A , atol=1E-4 )
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , __magic_name__=1_0_0_0 , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : List[Any] = batch_size
lowerCamelCase : List[Any] = seq_length
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : Union[str, Any] = use_input_mask
lowerCamelCase : str = use_token_type_ids
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : int = vocab_size
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : List[Any] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Dict = type_sequence_label_size
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : str = num_labels
lowerCamelCase : Dict = num_choices
lowerCamelCase : List[str] = scope
lowerCamelCase : Optional[int] = range_bbox
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase : Dict = bbox[i, j, 3]
lowerCamelCase : Any = bbox[i, j, 1]
lowerCamelCase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase : Union[str, Any] = bbox[i, j, 2]
lowerCamelCase : Tuple = bbox[i, j, 0]
lowerCamelCase : Optional[Any] = t
lowerCamelCase : str = tf.convert_to_tensor(__magic_name__ )
lowerCamelCase : int = None
if self.use_input_mask:
lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = TFLayoutLMModel(config=__magic_name__ )
lowerCamelCase : Optional[int] = model(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ , __magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase : Any = model(__magic_name__ , __magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFLayoutLMForMaskedLM(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : List[str] = TFLayoutLMForSequenceClassification(config=__magic_name__ )
lowerCamelCase : Dict = model(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = self.num_labels
lowerCamelCase : Dict = TFLayoutLMForTokenClassification(config=__magic_name__ )
lowerCamelCase : Optional[Any] = model(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = TFLayoutLMForQuestionAnswering(config=__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Any = config_and_inputs
lowerCamelCase : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[str] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = 10
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = TFLayoutLMModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = TFLayoutLMModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def UpperCamelCase__ ( self ):
pass
def _a ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCamelCase : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase : Optional[int] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase : str = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase : List[str] = model(input_ids=__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
# test the sequence output on [0, :3, :3]
lowerCamelCase : List[str] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase : List[Any] = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __magic_name__ , atol=1e-3 ) )
@slow
def UpperCamelCase__ ( self ):
# initialize model with randomly initialized sequence classification head
lowerCamelCase : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase : Union[str, Any] = model(
input_ids=__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase : List[str] = outputs.loss
lowerCamelCase : Dict = (2,)
self.assertEqual(loss.shape , __magic_name__ )
# test the shape of the logits
lowerCamelCase : int = outputs.logits
lowerCamelCase : str = (2, 2)
self.assertEqual(logits.shape , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# initialize model with randomly initialized token classification head
lowerCamelCase : int = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=1_3 )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase : int = model(
input_ids=__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
# test the shape of the logits
lowerCamelCase : Optional[int] = outputs.logits
lowerCamelCase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , __magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# initialize model with randomly initialized token classification head
lowerCamelCase : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase : Dict = model(input_ids=__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
# test the shape of the logits
lowerCamelCase : Dict = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , __magic_name__ )
self.assertEqual(outputs.end_logits.shape , __magic_name__ )
| 287 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> str:
__UpperCamelCase =XLMRobertaModel.from_pretrained('xlm-roberta-base' )
__UpperCamelCase =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase =torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase =torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase =model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =XLMRobertaModel.from_pretrained('xlm-roberta-large' )
__UpperCamelCase =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase =torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase =torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase =model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) ) | 365 |
from ....utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=None , A_=2048 ) -> Any:
__UpperCamelCase =config.__dict__
__UpperCamelCase =modal_hidden_size
if num_labels:
__UpperCamelCase =num_labels
| 117 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase__ : Dict =parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->Any:
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : List[str] =os.path.join(lowerCAmelCase_ , 'all_results.json' )
if os.path.exists(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , 'r' ) as f:
lowerCamelCase__ : Dict =json.load(lowerCAmelCase_ )
else:
raise ValueError(f"""can\'t find {path}""" )
return results
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : Any =torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( A__ ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls :Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =tempfile.mkdtemp()
lowerCamelCase__ : Any =os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase__ : int =['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls :List[Any] ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[str] =f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
lowerCamelCase__ : Optional[int] =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[Any] =f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCamelCase__ : Union[str, Any] =get_results(_lowerCAmelCase )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[Any] =f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : List[str] =get_results(_lowerCAmelCase )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : List[str] =7 if get_gpu_count() > 1 else 2
lowerCamelCase__ : int =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Tuple =f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : Union[str, Any] =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Optional[Any] =f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : Union[str, Any] =get_results(_lowerCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : str =f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : Dict =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Optional[Any] =f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : List[str] =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[str] =f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : Optional[Any] =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'translation_no_trainer' ) ) )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCAmelCase )
lowerCamelCase__ : Any =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Union[str, Any] =f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCamelCase__ : int =get_results(_lowerCAmelCase )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : int =self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Optional[Any] =f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
lowerCamelCase__ : Dict =get_results(_lowerCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , 'image_classification_no_trainer' ) ) ) | 126 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 159 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__lowerCAmelCase = {'''mgp-str''': 27}
class __magic_name__ ( a__ ):
lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any="[GO]" ,_UpperCAmelCase : Dict="[GO]" ,_UpperCAmelCase : int="[s]" ,_UpperCAmelCase : Optional[Any]="[GO]" ,**_UpperCAmelCase : Optional[int] ):
super().__init__(
unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,**_lowerCamelCase ,)
with open(_lowerCamelCase ,encoding='utf-8' ) as vocab_handle:
_a : List[Any] = json.load(_lowerCamelCase )
_a : int = {v: k for k, v in self.vocab.items()}
@property
def __lowercase ( self : List[Any] ):
return len(self.vocab )
def __lowercase ( self : Tuple ):
return dict(self.vocab ,**self.added_tokens_encoder )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = []
for s in text:
char_tokens.extend(_lowerCamelCase )
return char_tokens
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[Any] ):
return self.vocab.get(_lowerCamelCase ,self.vocab.get(self.unk_token ) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Union[str, Any] ):
return self.decoder.get(_lowerCamelCase )
def __lowercase ( self : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_lowerCamelCase ) )
return
_a : str = os.path.join(
_lowerCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_lowerCamelCase ,ensure_ascii=_lowerCamelCase ) + '\n' )
return (vocab_file,)
| 369 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Optional[int] = old_name
if "patch_embed" in old_name:
_a , _a , _a : int = old_name.split('.' )
if layer == "0":
_a : Dict = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a : List[str] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a : List[str] = old_name.replace('3' , 'convolution2' )
else:
_a : Union[str, Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , lowerCAmelCase_ ):
_a : Dict = r'\b\d{2}\b'
if bool(re.search(lowerCAmelCase_ , lowerCAmelCase_ ) ):
_a : Dict = re.search(r'\d\.\d\d.' , lowerCAmelCase_ ).group()
else:
_a : Optional[int] = re.search(r'\d\.\d.' , lowerCAmelCase_ ).group()
if int(match[0] ) < 6:
_a : int = old_name.replace(lowerCAmelCase_ , '' )
_a : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a : int = 'intermediate_stages.' + trimmed_name
else:
_a : Tuple = old_name.replace(lowerCAmelCase_ , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a : int = str(int(match[2] ) - num_meta4D_last_stage )
_a : List[str] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a : Union[str, Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a : List[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a : Dict = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a : List[Any] = trimmed_name.replace('fc2' , 'linear_out' )
_a : Dict = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , lowerCAmelCase_ ):
_a : List[str] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a : Optional[int] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a : Tuple = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a : Union[str, Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a : Tuple = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a : List[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a : Dict = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a : Tuple = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a : int = new_name.replace('norm' , 'layernorm' )
_a : Any = 'efficientformer.' + new_name
else:
_a : Optional[int] = 'efficientformer.encoder.' + new_name
return new_name
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
for key in checkpoint.copy().keys():
_a : List[str] = checkpoint.pop(lowerCAmelCase_ )
_a : List[str] = val
return checkpoint
def __lowerCamelCase ( ) -> Any:
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : int = torch.load(lowerCAmelCase_ , map_location='cpu' )['model']
_a : Any = EfficientFormerConfig.from_json_file(lowerCAmelCase_ )
_a : Optional[int] = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase_ )
_a : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a : Dict = config.depths[-1] - config.num_metaad_blocks + 1
_a : int = convert_torch_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
_a : Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a : Any = prepare_img()
_a : Dict = 256
_a : Dict = 224
_a : int = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a : Optional[int] = processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values
# original processing pipeline
_a : str = Compose(
[
Resize(lowerCAmelCase_ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
Normalize(lowerCAmelCase_ , lowerCAmelCase_ ),
] )
_a : Any = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Tuple = model(lowerCAmelCase_ )
_a : Tuple = outputs.logits
_a : List[Any] = (1, 1000)
if "l1" in model_name:
_a : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a : Tuple = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 107 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_UpperCAmelCase : List[str] = {
"""vinai/phobert-base""": 2_56,
"""vinai/phobert-large""": 2_56,
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = set()
lowerCamelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : str = char
lowerCamelCase__ : List[Any] = set(_UpperCAmelCase )
return pairs
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : int="<mask>" , **UpperCAmelCase : Tuple , ) -> List[Any]:
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : Union[str, Any] = vocab_file
lowerCamelCase__ : int = merges_file
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : str = 1
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : str = 3
self.add_from_file(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase , encoding='utf-8' ) as merges_handle:
lowerCamelCase__ : Optional[Any] = merges_handle.read().split('\n' )[:-1]
lowerCamelCase__ : List[str] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase__ : str = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase__ : str = {}
def A_ ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
lowerCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Dict ) -> Any:
return len(self.encoder )
def A_ ( self : int ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Any , UpperCAmelCase : Optional[Any] ) -> Tuple:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCamelCase__ : Dict = get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : Optional[int] = min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Dict = bigram
lowerCamelCase__ : Dict = []
lowerCamelCase__ : str = 0
while i < len(UpperCAmelCase ):
try:
lowerCamelCase__ : Dict = word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : str = tuple(UpperCAmelCase )
lowerCamelCase__ : List[Any] = new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowerCamelCase__ : int = get_pairs(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = '@@ '.join(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = word[:-4]
lowerCamelCase__ : Any = word
return word
def A_ ( self : int , UpperCAmelCase : List[str] ) -> int:
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = re.findall(R'\S+\n?' , UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase ).split(' ' ) ) )
return split_tokens
def A_ ( self : str , UpperCAmelCase : Any ) -> Optional[Any]:
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A_ ( self : Dict , UpperCAmelCase : Dict ) -> Optional[int]:
return self.decoder.get(UpperCAmelCase , self.unk_token )
def A_ ( self : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
lowerCamelCase__ : Optional[int] = ' '.join(UpperCAmelCase ).replace('@@ ' , '' ).strip()
return out_string
def A_ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : Optional[int] = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[int] = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.merges_file , UpperCAmelCase )
return out_vocab_file, out_merge_file
def A_ ( self : List[str] , UpperCAmelCase : str ) -> Optional[int]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCamelCase__ : Tuple = f.readlines()
for lineTmp in lines:
lowerCamelCase__ : Dict = lineTmp.strip()
lowerCamelCase__ : str = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
lowerCamelCase__ : str = line[:idx]
lowerCamelCase__ : int = len(self.encoder )
| 50 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase : str ='''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCAmelCase : List[str] =requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCAmelCase : List[Any] =BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase : List[Any] =list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 223 | 0 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 5_0 ):
"""simple docstring"""
__UpperCamelCase =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 366 | """simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85 | 0 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 121 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 139 |
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = len(snake_case )
for _ in range(snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A_ = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 139 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> Union[str, Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
lowerCAmelCase__ : Tuple = torch.tensor(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase__ : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase__ : List[Any] = logits[0, masked_index, :]
lowerCAmelCase__ : List[Any] = logits.softmax(dim=0 )
lowerCAmelCase__ : str = prob.topk(k=__UpperCAmelCase , dim=0 )
lowerCAmelCase__ : int = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCAmelCase ) )] )
lowerCAmelCase__ : Dict = tokenizer.mask_token
lowerCAmelCase__ : Dict = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
lowerCAmelCase__ : Optional[Any] = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__UpperCAmelCase ) , __UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__UpperCAmelCase , __UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_A = CamembertTokenizer.from_pretrained("""camembert-base""")
_A = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
_A = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 354 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = ["image_processor", "tokenizer"]
_lowerCamelCase :Dict = "BlipImageProcessor"
_lowerCamelCase :Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = False
super().__init__(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.image_processor
def __call__( self : int , UpperCamelCase : ImageInput = None , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase__ : Any = self.tokenizer
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase__ : Tuple = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase )
if text is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
else:
lowerCAmelCase__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase )
return encoding_image_processor
def _lowerCAmelCase ( self : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 212 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Optional[Any] = "autoformer"
snake_case_ : Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[Any] , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "student_t" , snake_case__ : str = "nll" , snake_case__ : int = 1 , snake_case__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case__ : bool = True , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : Optional[List[int]] = None , snake_case__ : Optional[List[int]] = None , snake_case__ : int = 64 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 100 , snake_case__ : float = 0.02 , snake_case__ : bool = True , snake_case__ : Union[str, Any]=True , snake_case__ : int = 10 , snake_case__ : int = 25 , snake_case__ : int = 3 , **snake_case__ : Any , ):
"""simple docstring"""
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length if context_length is not None else prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Autoformer
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 133 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "ctrl"
snake_case_ : Optional[int] = ["past_key_values"]
snake_case_ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case__ : List[str]=246_534 , snake_case__ : Optional[Any]=256 , snake_case__ : List[str]=1_280 , snake_case__ : Optional[int]=8_192 , snake_case__ : List[Any]=48 , snake_case__ : Dict=16 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**snake_case__ )
| 133 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
snake_case__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
torch.manual_seed(0 )
_lowerCamelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCamelCase : Tuple = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : str = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Optional[int] = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_lowerCamelCase : Dict = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Optional[Any] = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Optional[int] = self.get_dummy_components()
_lowerCamelCase : str = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = '''french fries'''
_lowerCamelCase : Optional[Any] = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Optional[Any] = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = [inputs['''prompt''']] * 2
_lowerCamelCase : Optional[int] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_55.0
_lowerCamelCase : Dict = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
_lowerCamelCase : str = image / 2 + 0.5
_lowerCamelCase : Tuple = image.permute(0 , 3 , 1 , 2 )
_lowerCamelCase : Optional[Any] = image.repeat(2 , 1 , 1 , 1 )
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_lowerCamelCase : List[str] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
_lowerCamelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCamelCase : Any = [round(__lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Optional[Any] = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = VaeImageProcessor(do_resize=__lowerCAmelCase , do_normalize=__lowerCAmelCase )
_lowerCamelCase : int = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[str] = pipe(**self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type='''pt''' ) )[0]
_lowerCamelCase : Dict = components['''vae''']
_lowerCamelCase : Dict = self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCamelCase : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCamelCase : Any = pipe(**__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__lowerCAmelCase , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[str]=0 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
_lowerCamelCase : str = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = self.get_inputs()
_lowerCamelCase : int = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCAmelCase )
_lowerCamelCase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = self.get_inputs()
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCAmelCase )
_lowerCamelCase : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = self.get_inputs()
_lowerCamelCase : Optional[int] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 0
def callback_fn(__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor ) -> None:
_lowerCamelCase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCamelCase : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCamelCase : int = latents[0, -3:, -3:, -1]
_lowerCamelCase : Any = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowerCamelCase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCamelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowerCamelCase : Dict = False
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_lowerCamelCase : Dict = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : int = self.get_inputs()
pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_lowerCamelCase : Union[str, Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : Tuple = self.get_inputs()
_lowerCamelCase : Union[str, Any] = pipe(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCamelCase : List[str] = inputs['''image'''].resize((5_0_4, 5_0_4) )
_lowerCamelCase : List[Any] = '''timbrooks/instruct-pix2pix'''
_lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : int = pipe(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = output.images[0]
_lowerCamelCase : Optional[int] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_lowerCamelCase : str = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 175 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = BlenderbotSmallTokenizer
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : str = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Tuple = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Optional[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowerCamelCase : List[str] = '''I am a small frog.'''
_lowerCamelCase : str = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Any = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowerCamelCase : Optional[Any] = '''I am a small frog .'''
_lowerCamelCase : str = '''.'''
_lowerCamelCase : str = tok(__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 175 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 122 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def lowerCamelCase__ ( a__ : int , a__ : str , a__ : LevitConfig , a__ : Path , a__ : bool = True ) -> Any:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase_ = timm.create_model("""levit_128s""" , pretrained=a__ )
else:
UpperCamelCase_ = timm.create_model("""levit_128""" , pretrained=a__ )
if hidden_sizes == 192:
UpperCamelCase_ = timm.create_model("""levit_192""" , pretrained=a__ )
if hidden_sizes == 256:
UpperCamelCase_ = timm.create_model("""levit_256""" , pretrained=a__ )
if hidden_sizes == 384:
UpperCamelCase_ = timm.create_model("""levit_384""" , pretrained=a__ )
from_model.eval()
UpperCamelCase_ = LevitForImageClassificationWithTeacher(a__ ).eval()
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = from_model.state_dict()
UpperCamelCase_ = list(from_model.state_dict().keys() )
UpperCamelCase_ = list(our_model.state_dict().keys() )
print(len(a__ ) , len(a__ ) )
for i in range(len(a__ ) ):
UpperCamelCase_ = weights[og_keys[i]]
our_model.load_state_dict(a__ )
UpperCamelCase_ = torch.randn((2, 3, 224, 224) )
UpperCamelCase_ = from_model(a__ )
UpperCamelCase_ = our_model(a__ ).logits
assert torch.allclose(a__ , a__ ), "The model logits don't match the original one."
UpperCamelCase_ = name
print(a__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( a__ : Path , a__ : str = None , a__ : bool = True ) -> str:
UpperCamelCase_ = """imagenet-1k-id2label.json"""
UpperCamelCase_ = 1000
UpperCamelCase_ = (1, num_labels)
UpperCamelCase_ = """huggingface/label-files"""
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_ = {int(a__ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
UpperCamelCase_ = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
UpperCamelCase_ = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 122 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__ : Dict = 'lm_head'
lowerCamelCase__ : Optional[int] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowerCamelCase__ : Any = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowerCamelCase__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCamelCase__ : int = value
elif weight_type == "weight_v":
lowerCamelCase__ : Any = value
elif weight_type == "bias":
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Dict = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = fairseq_model.state_dict()
lowerCamelCase__ : List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Union[str, Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Any = True
if "*" in mapped_key:
lowerCamelCase__ : Any = name.split(_UpperCAmelCase )[0].split('.' )[-2]
lowerCamelCase__ : str = mapped_key.replace('*' , _UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[Any] = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : Union[str, Any] = 'weight_v'
elif "bias" in name:
lowerCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : str = 'weight'
else:
lowerCamelCase__ : str = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Any = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Union[str, Any] = name.split('.' )
lowerCamelCase__ : int = int(items[0] )
lowerCamelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ) -> List[Any]:
if config_path is not None:
lowerCamelCase__ : str = UniSpeechConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ : Optional[int] = Dictionary.load_from_json(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : Optional[int] = target_dict.pad_index
lowerCamelCase__ : Optional[Any] = target_dict.bos_index
lowerCamelCase__ : Tuple = target_dict.eos_index
lowerCamelCase__ : Any = len(target_dict.symbols )
lowerCamelCase__ : str = os.path.join(_UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowerCamelCase__ : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Any = 42
lowerCamelCase__ : str = 43
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : List[Any] = WavaVecaPhonemeCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowerCamelCase__ : Dict = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
lowerCamelCase__ : List[str] = UniSpeechForCTC(_UpperCAmelCase )
else:
lowerCamelCase__ : Tuple = UniSpeechForPreTraining(_UpperCAmelCase )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ : Dict = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
hf_unispeech.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 45 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
super().__init__()
lowerCamelCase__ : List[str] = model
lowerCamelCase__ : Dict = 2
lowerCamelCase__ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# load longformer model from model identifier
lowerCamelCase__ : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = LightningModel(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase__ : Dict = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase ):
A_ : Tuple = 'focalnet'
def __init__(self : int , a__ : Dict=224 , a__ : Dict=4 , a__ : int=3 , a__ : List[Any]=96 , a__ : str=False , a__ : Optional[Any]=[192, 384, 768, 768] , a__ : Optional[Any]=[2, 2, 6, 2] , a__ : str=[2, 2, 2, 2] , a__ : Optional[Any]=[3, 3, 3, 3] , a__ : List[str]="gelu" , a__ : List[Any]=4.0 , a__ : List[Any]=0.0 , a__ : List[Any]=0.1 , a__ : Any=False , a__ : Optional[Any]=1E-4 , a__ : Dict=False , a__ : int=False , a__ : Optional[int]=False , a__ : int=0.0_2 , a__ : int=1E-5 , a__ : Tuple=32 , a__ : List[Any]=None , a__ : Tuple=None , **a__ : int , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = use_conv_embed
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = focal_levels
__snake_case = focal_windows
__snake_case = hidden_act
__snake_case = mlp_ratio
__snake_case = hidden_dropout_prob
__snake_case = drop_path_rate
__snake_case = use_layerscale
__snake_case = layerscale_value
__snake_case = use_post_layernorm
__snake_case = use_post_layernorm_in_modulation
__snake_case = normalize_modulator
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = encoder_stride
__snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__snake_case = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 24 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 0 |
UpperCamelCase__ = tuple[float, float, float]
UpperCamelCase__ = tuple[float, float, float]
def _a ( SCREAMING_SNAKE_CASE_ : Pointad , SCREAMING_SNAKE_CASE_ : Pointad ):
__lowerCAmelCase = end_pointa[0] - end_pointa[0]
__lowerCAmelCase = end_pointa[1] - end_pointa[1]
__lowerCAmelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( SCREAMING_SNAKE_CASE_ : Vectorad , SCREAMING_SNAKE_CASE_ : Vectorad ):
__lowerCAmelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCAmelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCAmelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( SCREAMING_SNAKE_CASE_ : Vectorad , SCREAMING_SNAKE_CASE_ : int ):
return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0)
def _a ( SCREAMING_SNAKE_CASE_ : Pointad , SCREAMING_SNAKE_CASE_ : Pointad , SCREAMING_SNAKE_CASE_ : Pointad , SCREAMING_SNAKE_CASE_ : int = 10 ):
__lowerCAmelCase = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 102 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase__ = """\
"""
UpperCamelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_6 , _A = True , _A=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = "cuda"
else:
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors="pt" , return_attention_mask=_A , ).to(_A )
__lowerCAmelCase = encodings["input_ids"]
__lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
__lowerCAmelCase = min(start_index + batch_size , len(_A ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(_A , attention_mask=_A ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 102 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = BlenderbotSmallTokenizer
_A : Optional[int] = False
def A_ ( self : Optional[int] ):
super().setUp()
UpperCamelCase__ = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase__ = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase__ = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def A_ ( self : Optional[int] , **_a : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Optional[int] , _a : str ):
UpperCamelCase__ = '''adapt act apte'''
UpperCamelCase__ = '''adapt act apte'''
return input_text, output_text
def A_ ( self : int ):
UpperCamelCase__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = '''adapt act apte'''
UpperCamelCase__ = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
UpperCamelCase__ = '''I am a small frog.'''
UpperCamelCase__ = tok([src_text] , padding=_a , truncation=_a )['''input_ids''']
UpperCamelCase__ = tok.batch_decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A_ ( self : str ):
UpperCamelCase__ = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase__ = '''I am a small frog .'''
UpperCamelCase__ = '''.'''
UpperCamelCase__ = tok(_a )['''input_ids''']
UpperCamelCase__ = tok(_a )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 360 | from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : int = MBartConfig
_A : str = {}
_A : str = '''gelu'''
def __init__( self : Tuple , _a : Dict , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : Any=True , _a : List[Any]=False , _a : List[Any]=99 , _a : int=32 , _a : Optional[Any]=2 , _a : Optional[Any]=4 , _a : Any=37 , _a : Any=0.1 , _a : Any=0.1 , _a : Dict=20 , _a : Optional[Any]=2 , _a : List[str]=1 , _a : List[str]=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def A_ ( self : Any ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_mbart_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A_ ( self : Union[str, Any] , _a : Tuple , _a : Dict ):
UpperCamelCase__ = TFMBartModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = inputs_dict['''head_mask''']
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
UpperCamelCase__ = past_key_values[1]
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Tuple=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : List[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[Any] = True
_A : Any = False
_A : List[Any] = False
def A_ ( self : Any , _a : Tuple , _a : List[Any] , _a : Tuple , _a : List[str] , _a : List[Any] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFMBartModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_A : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def A_ ( self : Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : str ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : Optional[int] , **_a : Optional[int] ):
UpperCamelCase__ = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text , _a )
def A_ ( self : List[str] , **_a : Dict ):
UpperCamelCase__ = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase__ = self.tokenizer.batch_decode(_a , skip_special_tokens=_a )
return generated_words
@slow
def A_ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 35 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _UpperCAmelCase ( a__):
'''simple docstring'''
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set())
@pytest.fixture
def _UpperCAmelCase ( a__):
'''simple docstring'''
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> Tuple:
a_ : Union[str, Any] = metric_id
class A__:
"""simple docstring"""
_A : Tuple = [MetricMock(a_ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def UpperCamelCase__ ( self ) -> Optional[Any]:
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock())
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))])
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
if "tmp_path" in args:
a_ : Tuple = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args)
with pytest.warns(a__ , match="""https://huggingface.co/docs/evaluate"""):
func(*a__)
| 248 |
def _UpperCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9)
for b in range(a__ , 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 8_8 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : Optional[int] = None , ) -> int:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase__ , attention_head_dim=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , num_layers=UpperCAmelCase__ , dropout=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , sample_size=UpperCAmelCase__ , num_vector_embeds=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , num_embeds_ada_norm=UpperCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : bool = True , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase__ )
| 354 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE =TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE =TaTokenizerFast
__SCREAMING_SNAKE_CASE ={"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 213 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = vocab_file
lowercase_ : Optional[Any] = False if not self.vocab_file else True
lowercase_ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Dict = src_lang if src_lang is not None else 'en_XX'
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : List[Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Optional[Any] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Dict = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "en_XX" ,__UpperCamelCase = None ,__UpperCamelCase = "ro_RO" ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Union[str, Any] = src_lang
lowercase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Any = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Optional[Any] = []
lowercase_ : int = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : str = []
lowercase_ : Dict = [self.eos_token_id, self.cur_lang_code]
lowercase_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Tuple = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file ,__UpperCamelCase )
return (out_vocab_file,)
| 213 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : str , __A : str , __A : Path , __A : str = None , __A : str = None , __A : str = None , ) -> Any:
"""simple docstring"""
if config_name_or_path is None:
a_ : Tuple = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
a_ : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a_ : List[Any] = question_encoder_name_or_path
a_ : Any = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
a_ : Any = RagConfig.from_pretrained(__A )
a_ : Any = AutoConfig.from_pretrained(__A )
a_ : Tuple = AutoConfig.from_pretrained(__A )
a_ : int = gen_config
a_ : Any = question_encoder_config
a_ : Dict = model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
a_ : List[Any] = AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
a_ : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCAmelCase_ : int = parser.parse_args()
UpperCAmelCase_ : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 120 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase_ : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : Optional[int] = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('-f' )
a_ : Optional[Any] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any]="eval" ) -> Optional[int]:
"""simple docstring"""
a_ : List[Any] = os.path.join(__A , F"""{split}_results.json""" )
if os.path.exists(__A ):
with open(__A , 'r' ) as f:
return json.load(__A )
raise ValueError(F"""can't find {path}""" )
UpperCAmelCase_ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ : Optional[Any] = self.get_auto_remove_tmp_dir()
a_ : List[str] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_flax_glue.main()
a_ : str = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : Union[str, Any] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_clm_flax.main()
a_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['eval_perplexity'] , 1_0_0 )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : Tuple = self.get_auto_remove_tmp_dir()
a_ : Dict = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_summarization_flax.main()
a_ : List[str] = get_results(SCREAMING_SNAKE_CASE__ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 1_0 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : int = self.get_auto_remove_tmp_dir()
a_ : Dict = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_mlm_flax.main()
a_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result['eval_perplexity'] , 4_2 )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : str = self.get_auto_remove_tmp_dir()
a_ : List[str] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_ta_mlm_flax.main()
a_ : Dict = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ : int = 7 if get_gpu_count() > 1 else 2
a_ : Dict = self.get_auto_remove_tmp_dir()
a_ : Tuple = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_flax_ner.main()
a_ : List[str] = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : int = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_qa.main()
a_ : str = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result['eval_f1'] , 3_0 )
self.assertGreaterEqual(result['eval_exact'] , 3_0 )
| 120 | 1 |
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
lowerCamelCase__: Tuple =str(abs(__a ) )
lowerCamelCase__: Union[str, Any] =[list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = " " ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
"""simple docstring"""
from __future__ import annotations
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
__SCREAMING_SNAKE_CASE = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join("* [{title}]({url})".format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 195 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __lowerCAmelCase (_UpperCamelCase ):
if "model" in orig_key:
__lowerCAmelCase : str = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
__lowerCAmelCase : List[str] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
__lowerCAmelCase : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
__lowerCAmelCase : Tuple = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
__lowerCAmelCase : Dict = orig_key.split('.' )[0].split('_' )[-1]
__lowerCAmelCase : Any = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
__lowerCAmelCase : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
__lowerCAmelCase : Tuple = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
__lowerCAmelCase : Union[str, Any] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
__lowerCAmelCase : Union[str, Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
__lowerCAmelCase : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
__lowerCAmelCase : Any = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
__lowerCAmelCase : List[Any] = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
__lowerCAmelCase : Any = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
__lowerCAmelCase : Dict = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
__lowerCAmelCase : Optional[Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
__lowerCAmelCase : str = 'yoso.' + orig_key
return orig_key
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__lowerCAmelCase : Tuple = val
__lowerCAmelCase : Tuple = orig_state_dict['cls.predictions.decoder.bias']
__lowerCAmelCase : List[Any] = torch.arange(_UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = torch.load(_UpperCamelCase , map_location='cpu' )['model_state_dict']
__lowerCAmelCase : List[str] = YosoConfig.from_json_file(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = YosoForMaskedLM(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , _UpperCamelCase )
print(model.load_state_dict(_UpperCamelCase ) )
model.eval()
model.save_pretrained(_UpperCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = old_name
if "patch_embed" in old_name:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = old_name.split('''.''' )
if layer == "0":
UpperCamelCase__ = old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
UpperCamelCase__ = old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
UpperCamelCase__ = old_name.replace('''3''', '''convolution2''' )
else:
UpperCamelCase__ = old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''', UpperCamelCase__ ):
UpperCamelCase__ = r'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ):
UpperCamelCase__ = re.search(r'''\d\.\d\d.''', UpperCamelCase__ ).group()
else:
UpperCamelCase__ = re.search(r'''\d\.\d.''', UpperCamelCase__ ).group()
if int(match[0] ) < 6:
UpperCamelCase__ = old_name.replace(UpperCamelCase__, '''''' )
UpperCamelCase__ = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
UpperCamelCase__ = '''intermediate_stages.''' + trimmed_name
else:
UpperCamelCase__ = old_name.replace(UpperCamelCase__, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
UpperCamelCase__ = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
UpperCamelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCamelCase__ = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
UpperCamelCase__ = trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
UpperCamelCase__ = trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
UpperCamelCase__ = trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
UpperCamelCase__ = trimmed_name.replace('''fc2''', '''linear_out''' )
UpperCamelCase__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', UpperCamelCase__ ):
UpperCamelCase__ = old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
UpperCamelCase__ = new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCamelCase__ = new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCamelCase__ = new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
UpperCamelCase__ = new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
UpperCamelCase__ = new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
UpperCamelCase__ = new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
UpperCamelCase__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCamelCase__ = new_name.replace('''norm''', '''layernorm''' )
UpperCamelCase__ = '''efficientformer.''' + new_name
else:
UpperCamelCase__ = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
for key in checkpoint.copy().keys():
UpperCamelCase__ = checkpoint.pop(UpperCamelCase__ )
UpperCamelCase__ = val
return checkpoint
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
UpperCamelCase__ = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
UpperCamelCase__ = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
UpperCamelCase__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
UpperCamelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCamelCase__ = convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
UpperCamelCase__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = 256
UpperCamelCase__ = 224
UpperCamelCase__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
UpperCamelCase__ = processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
# original processing pipeline
UpperCamelCase__ = Compose(
[
Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__, UpperCamelCase__ ),
] )
UpperCamelCase__ = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = model(UpperCamelCase__ )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = (1, 1000)
if "l1" in model_name:
UpperCamelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCamelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCamelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(UpperCamelCase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowercase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 35 | lowercase = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import math
import sys
def lowercase_ ( _lowerCamelCase : int):
if number != int(_lowerCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
lowercase__ : Optional[Any] = [-1] * (number + 1)
lowercase__ : Any = 0
for i in range(1 , number + 1):
lowercase__ : Optional[Any] = sys.maxsize
lowercase__ : Dict = int(math.sqrt(_lowerCamelCase))
for j in range(1 , root + 1):
lowercase__ : Optional[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 1 |
'''simple docstring'''
UpperCamelCase = tuple[float, float, float]
UpperCamelCase = tuple[float, float, float]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Vectorad:
A: Optional[Any] = end_pointa[0] - end_pointa[0]
A: Any = end_pointa[1] - end_pointa[1]
A: str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Vectorad:
A: Union[str, Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
A: Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A: Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> bool:
return tuple(round(__lowercase , __lowercase ) for x in vector ) == (0, 0, 0)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = 1_0 ) -> bool:
A: Any = create_vector(__lowercase , __lowercase )
A: Any = create_vector(__lowercase , __lowercase )
return is_zero_vector(get_ad_vectors_cross(__lowercase , __lowercase ) , __lowercase )
| 334 |
'''simple docstring'''
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
# fetching a list of articles in json format
A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 334 | 1 |
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(a_ , x % y )
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(a_ , a_ )
def UpperCAmelCase ( a_ = 2_0 ) -> int:
"""simple docstring"""
__A = 1
for i in range(1 , n + 1 ):
__A = lcm(a_ , a_ )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase : int ="""Hello, World!"""
UpperCAmelCase : int ="""en_XX"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = Path("data_bin")
UpperCamelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase).parent) , checkpoint_file=Path(_lowerCAmelCase).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_lowerCAmelCase) , bpe="sentencepiece" , sentencepiece_model=str(Path(_lowerCAmelCase).parent / "sentencepiece.bpe.model") , src_dict=str(data_dir / "dict.txt") , )
xmod.eval() # disable dropout
print(_lowerCAmelCase)
UpperCamelCase_ = xmod.model.encoder.sentence_encoder
UpperCamelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , _lowerCAmelCase)
UpperCamelCase_ = XmodForSequenceClassification(_lowerCAmelCase) if classification_head else XmodForMaskedLM(_lowerCAmelCase)
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase_ = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase_ = xmod_sent_encoder.embed_positions.weight
UpperCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
UpperCamelCase_ = model.roberta.encoder.layer[i]
UpperCamelCase_ = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError("Dimensions of self-attention weights do not match.")
UpperCamelCase_ = xmod_layer.self_attn.q_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.q_proj.bias
UpperCamelCase_ = xmod_layer.self_attn.k_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.k_proj.bias
UpperCamelCase_ = xmod_layer.self_attn.v_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match.")
UpperCamelCase_ = xmod_layer.self_attn.out_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.out_proj.bias
UpperCamelCase_ = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match.")
UpperCamelCase_ = xmod_layer.fca.weight
UpperCamelCase_ = xmod_layer.fca.bias
# output
UpperCamelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match.")
UpperCamelCase_ = xmod_layer.fca.weight
UpperCamelCase_ = xmod_layer.fca.bias
UpperCamelCase_ = xmod_layer.final_layer_norm.weight
UpperCamelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase_ = xmod_layer.adapter_layer_norm.weight
UpperCamelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError("Lists of language adapters do not match.")
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase_ = bert_output.adapter_modules[lang_code]
UpperCamelCase_ = xmod_layer.adapter_modules[lang_code]
UpperCamelCase_ = from_adapter.fca.weight
UpperCamelCase_ = from_adapter.fca.bias
UpperCamelCase_ = from_adapter.fca.weight
UpperCamelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase_ = xmod_sent_encoder.layer_norm.weight
UpperCamelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.weight
UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.bias
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCamelCase_ = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase_ = xmod.model.encoder.lm_head.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase_ = xmod.encode(_lowerCAmelCase).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase)
UpperCamelCase_ = model(_lowerCAmelCase)[0]
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase))
else:
UpperCamelCase_ = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
UpperCamelCase_ = torch.max(torch.abs(our_output - their_output)).item()
print(f"""max_absolute_diff = {max_absolute_diff}""") # ~ 1e-7
UpperCamelCase_ = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3)
print("Do both models output the same tensors?" , "🔥" if success else "💩")
if not success:
raise Exception("Something went wRoNg")
Path(_lowerCAmelCase).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCAmelCase : Tuple =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 128 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __lowerCAmelCase ( __UpperCamelCase ):
def __init__(self , **lowerCAmelCase__ ):
super().__init__(**_lowerCAmelCase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ (self , **lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCAmelCase : str = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a sound of {}." ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase : int = requests.get(_lowerCAmelCase ).content
else:
with open(_lowerCAmelCase , """rb""" ) as f:
_UpperCAmelCase : Dict = f.read()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ffmpeg_read(_lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(_lowerCAmelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_UpperCAmelCase : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_UpperCAmelCase : Any = candidate_labels
_UpperCAmelCase : int = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
_UpperCAmelCase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
_UpperCAmelCase : str = [text_inputs]
return inputs
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = model_inputs.pop("""candidate_labels""" )
_UpperCAmelCase : Tuple = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : int = text_inputs[0][0]
_UpperCAmelCase : int = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
_UpperCAmelCase : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = model_outputs.pop("""candidate_labels""" )
_UpperCAmelCase : Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCAmelCase : Optional[int] = logits.softmax(dim=0 )
_UpperCAmelCase : Any = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_UpperCAmelCase : int = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Dict:
"""simple docstring"""
A__ = ''''''
if is_panoptic:
A__ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ = '''resnet101'''
if "dc5" in model_name:
A__ = True
A__ = '''panoptic''' in model_name
if is_panoptic:
A__ = 250
else:
A__ = 91
A__ = '''huggingface/label-files'''
A__ = '''coco-detection-id2label.json'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load image processor
A__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
A__ = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
A__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
A__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A__ = state_dict.pop(lowercase_ )
A__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ = state_dict.pop(lowercase_ )
A__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A__ = state_dict.pop(lowercase_ )
A__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A__ = conditional_detr(lowercase_ )
A__ = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 14 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )} | 81 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a = StableDiffusionInpaintPipeline
a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a = frozenset([] )
def lowerCamelCase_ ( self: str ):
torch.manual_seed(0 )
lowerCamelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , )
lowerCamelCase__ : Any = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase__ : Tuple = CLIPTextModel(__lowerCamelCase )
lowerCamelCase__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
lowerCamelCase__ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(__lowerCamelCase ).startswith("""mps""" ):
lowerCamelCase__ : Any = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : int = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = StableDiffusionInpaintPipeline(**__lowerCamelCase )
lowerCamelCase__ : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : List[str] = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = sd_pipe(**__lowerCamelCase ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Optional[int] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCamelCase__ : List[Any] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__ : int = StableDiffusionInpaintPipeline.from_pretrained(__lowerCamelCase , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , )
lowerCamelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCamelCase__ : List[Any] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , )
lowerCamelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase_ ( self: Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__ : List[Any] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__ : List[str] = PNDMScheduler.from_pretrained(__lowerCamelCase , subfolder="""scheduler""" )
lowerCamelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , scheduler=__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : Any = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 367 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Dict =parser.parse_args()
_A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Any =CLIPImageProcessor()
_A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : Union[str, Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 129 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowercase_ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowercase_ = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Tuple = json.loads(f.read() )
__SCREAMING_SNAKE_CASE : Any = collections.OrderedDict()
__SCREAMING_SNAKE_CASE : int = collections.OrderedDict()
__SCREAMING_SNAKE_CASE : List[Any] = collections.OrderedDict()
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.readlines()
__SCREAMING_SNAKE_CASE : List[Any] = [[t.rstrip('''\n''' )] if (t == ',' or ',' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = b
__SCREAMING_SNAKE_CASE : str = idx
for wd in b:
__SCREAMING_SNAKE_CASE : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _A : List[str] , _A : Any , _A : str="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[Any]="<|startoftext|>" , _A : Optional[Any]="<|endoftext|>" , _A : Dict=False , **_A : str , ):
"""simple docstring"""
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_A ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
__SCREAMING_SNAKE_CASE : List[Any] = do_clean_text
__SCREAMING_SNAKE_CASE : Dict = load_vocab_and_emoji(_A , _A )
__SCREAMING_SNAKE_CASE : List[str] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : str , _A : str ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : int , _A : str ):
"""simple docstring"""
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Tuple ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_A )
def UpperCAmelCase__ ( self : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ''.join(_A ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : Any = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : List[Any] , _A : int , _A : int = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
if os.path.isdir(_A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
__SCREAMING_SNAKE_CASE : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__SCREAMING_SNAKE_CASE : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(''','''.join(_A ) + '''\n''' )
index += 1
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : Tuple , _A : int , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = vocab # same as swe
__SCREAMING_SNAKE_CASE : Optional[int] = ids_to_tokens # same as bpe
__SCREAMING_SNAKE_CASE : int = emoji
__SCREAMING_SNAKE_CASE : Tuple = np.max([len(_A ) for w in self.vocab.keys()] )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
__SCREAMING_SNAKE_CASE : Dict = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
__SCREAMING_SNAKE_CASE : int = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__SCREAMING_SNAKE_CASE : str = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
__SCREAMING_SNAKE_CASE : Any = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__SCREAMING_SNAKE_CASE : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__SCREAMING_SNAKE_CASE : Optional[Any] = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : Optional[int] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.content_repattera.sub('''<URL>''' , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.content_repattera.sub('''<EMAIL>''' , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.content_repattera.sub('''<TEL>''' , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.content_repattera.sub('''<DATE>''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = self.content_repattera.sub('''<DATE>''' , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.content_repattera.sub('''<PRICE>''' , _A )
__SCREAMING_SNAKE_CASE : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__SCREAMING_SNAKE_CASE : Optional[int] = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def UpperCAmelCase__ ( self : str , _A : str , _A : List[Any]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = text.replace(''' ''' , '''<SP>''' )
__SCREAMING_SNAKE_CASE : Dict = text.replace(''' ''' , '''<SP>''' )
__SCREAMING_SNAKE_CASE : int = text.replace('''\r\n''' , '''<BR>''' )
__SCREAMING_SNAKE_CASE : Tuple = text.replace('''\n''' , '''<BR>''' )
__SCREAMING_SNAKE_CASE : Dict = text.replace('''\r''' , '''<BR>''' )
__SCREAMING_SNAKE_CASE : Any = text.replace('''\t''' , '''<TAB>''' )
__SCREAMING_SNAKE_CASE : List[Any] = text.replace('''—''' , '''ー''' )
__SCREAMING_SNAKE_CASE : List[str] = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
__SCREAMING_SNAKE_CASE : str = text.replace(_A , _A )
if clean:
__SCREAMING_SNAKE_CASE : List[Any] = self.clean_text(_A )
def check_simbol(_A : List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
__SCREAMING_SNAKE_CASE : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(_A : Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
__SCREAMING_SNAKE_CASE : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Optional[int] = []
while pos < len(_A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__SCREAMING_SNAKE_CASE : Dict = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
__SCREAMING_SNAKE_CASE : Any = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
__SCREAMING_SNAKE_CASE : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
__SCREAMING_SNAKE_CASE : Dict = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
__SCREAMING_SNAKE_CASE : List[str] = e
else:
__SCREAMING_SNAKE_CASE : Tuple = pos + 1
__SCREAMING_SNAKE_CASE : Any = text[pos:end]
if check_simbol(_A ):
result.append('''<KIGOU>''' )
elif checkuae(_A ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
__SCREAMING_SNAKE_CASE : int = end
return result
def UpperCAmelCase__ ( self : int , _A : Union[str, Any] , _A : str="\n" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('''utf-8''' , errors='''replace''' ) )
__SCREAMING_SNAKE_CASE : str = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('''utf-8''' , errors='''replace''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''.join(_A )
return text
| 303 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase__( __UpperCamelCase: np.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def lowercase__( __UpperCamelCase: np.ndarray ):
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def lowercase__( __UpperCamelCase: np.ndarray ,__UpperCamelCase: np.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.zeros_like(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCamelCase_ = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
UpperCamelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
UpperCamelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCamelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCamelCase_ = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 251 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , )
return model
@property
def __lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCAmelCase : Dict = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCAmelCase : List[Any] = DDPMScheduler()
_UpperCAmelCase : Dict = AudioDiffusionPipeline(vqvae=A , unet=self.dummy_unet , mel=A , scheduler=A )
_UpperCAmelCase : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : str = torch.Generator(device=A ).manual_seed(4_2 )
_UpperCAmelCase : Tuple = pipe(generator=A , steps=4 )
_UpperCAmelCase : Optional[int] = output.audios[0]
_UpperCAmelCase : Any = output.images[0]
_UpperCAmelCase : str = torch.Generator(device=A ).manual_seed(4_2 )
_UpperCAmelCase : List[Any] = pipe(generator=A , steps=4 , return_dict=A )
_UpperCAmelCase : List[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCAmelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
_UpperCAmelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0]
_UpperCAmelCase : int = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase : Optional[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCAmelCase : Any = DDIMScheduler()
_UpperCAmelCase : Any = self.dummy_vqvae_and_unet
_UpperCAmelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A , scheduler=A )
_UpperCAmelCase : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
_UpperCAmelCase : int = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCAmelCase : Dict = torch.Generator(device=A ).manual_seed(4_2 )
_UpperCAmelCase : List[Any] = pipe(raw_audio=A , generator=A , start_step=5 , steps=1_0 )
_UpperCAmelCase : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCAmelCase : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
_UpperCAmelCase : str = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase : Union[str, Any] = self.dummy_unet_condition
_UpperCAmelCase : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A , mel=A , scheduler=A )
_UpperCAmelCase : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
_UpperCAmelCase : Optional[Any] = torch.rand((1, 1, 1_0) )
_UpperCAmelCase : List[Any] = pipe(generator=A , encoding=A )
_UpperCAmelCase : Dict = output.images[0]
_UpperCAmelCase : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
_UpperCAmelCase : Any = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = torch_device
_UpperCAmelCase : Tuple = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCAmelCase : str = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(4_2 )
_UpperCAmelCase : Dict = pipe(generator=A )
_UpperCAmelCase : Union[str, Any] = output.audios[0]
_UpperCAmelCase : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCAmelCase : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
_UpperCAmelCase : Tuple = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 366 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
A_ : int = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
A_ : Tuple = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__ , snake_case__ ) )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Tuple="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Dict , ) -> Any:
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str ) -> Tuple:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """ """.join(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = word
return word
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Tuple ) -> Union[str, Any]:
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str ) -> Any:
return self.decoder.get(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + """\n""" )
SCREAMING_SNAKE_CASE__ = 0
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = """ """ + text
return (text, kwargs)
| 165 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=True ) -> Any:
SCREAMING_SNAKE_CASE__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any=True ) -> Union[str, Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# there is always at least one resnet
SCREAMING_SNAKE_CASE__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE__ = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
def __call__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=True ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 165 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Dict = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Any , snake_case :PriorTransformer , snake_case :CLIPVisionModel , snake_case :CLIPImageProcessor , snake_case :HeunDiscreteScheduler , snake_case :ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=snake_case , image_encoder=snake_case , image_processor=snake_case , scheduler=snake_case , renderer=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :str , snake_case :List[Any] , snake_case :Union[str, Any] ):
'''simple docstring'''
if latents is None:
A_ : Optional[int] = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A_ : Union[str, Any] = latents.to(snake_case )
A_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A_ : Any = torch.device(f"cuda:{gpu_id}" )
A_ : str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Optional[Any] , snake_case :List[Any] , snake_case :List[Any] , snake_case :Optional[int] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ) and isinstance(image[0] , torch.Tensor ):
A_ : List[str] = torch.cat(snake_case , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case , axis=0 )
if not isinstance(snake_case , torch.Tensor ):
A_ : Dict = self.image_processor(snake_case , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=snake_case )
A_ : Any = self.image_encoder(snake_case )["last_hidden_state"]
A_ : int = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : Optional[Any] = image_embeds.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
A_ : Optional[int] = torch.zeros_like(snake_case )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self :Optional[Any] , snake_case :Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case :int = 1 , snake_case :int = 25 , snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :float = 4.0 , snake_case :int = 64 , snake_case :Optional[str] = "pil" , snake_case :bool = True , ):
'''simple docstring'''
if isinstance(snake_case , PIL.Image.Image ):
A_ : Tuple = 1
elif isinstance(snake_case , torch.Tensor ):
A_ : List[Any] = image.shape[0]
elif isinstance(snake_case , snake_case ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : str = len(snake_case )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case )}" )
A_ : int = self._execution_device
A_ : Optional[Any] = batch_size * num_images_per_prompt
A_ : Dict = guidance_scale > 1.0
A_ : Tuple = self._encode_image(snake_case , snake_case , snake_case , snake_case )
# prior
self.scheduler.set_timesteps(snake_case , device=snake_case )
A_ : Optional[Any] = self.scheduler.timesteps
A_ : List[Any] = self.prior.config.num_embeddings
A_ : Optional[int] = self.prior.config.embedding_dim
A_ : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : List[Any] = latents.reshape(latents.shape[0] , snake_case , snake_case )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : int = self.scheduler.scale_model_input(snake_case , snake_case )
A_ : Optional[Any] = self.prior(
snake_case , timestep=snake_case , proj_embedding=snake_case , ).predicted_image_embedding
# remove the variance
A_ , A_ : Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Any = self.scheduler.step(
snake_case , timestep=snake_case , sample=snake_case , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case )
A_ : Optional[Any] = []
for i, latent in enumerate(snake_case ):
print()
A_ : str = self.renderer.decode(
latent[None, :] , snake_case , size=snake_case , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(snake_case )
A_ : Optional[int] = torch.stack(snake_case )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
A_ : str = images.cpu().numpy()
if output_type == "pil":
A_ : Any = [self.numpy_to_pil(snake_case ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case )
| 70 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :int , snake_case :int = 16 , snake_case :int = 88 , snake_case :Optional[int] = None , snake_case :int = 1 , snake_case :float = 0.0 , snake_case :int = 32 , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[int] = None , snake_case :str = "geglu" , snake_case :Optional[int] = None , ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A_ : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A_ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A_ : Union[str, Any] = [1, 0]
def SCREAMING_SNAKE_CASE ( self :int , snake_case :int , snake_case :List[Any] , snake_case :int=None , snake_case :Optional[Any]=None , snake_case :Tuple=None , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[str] = hidden_states
A_ : Optional[Any] = []
A_ : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A_ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A_ : Optional[int] = self.transformer_index_for_condition[i]
A_ : Union[str, Any] = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A_ : Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A_ : Optional[int] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 70 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCAmelCase : str = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__SCREAMING_SNAKE_CASE = deprecated_arg[3:]
setattr(self , lowerCamelCase__ , not kwargs.pop(lowerCamelCase__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
__SCREAMING_SNAKE_CASE = kwargs.pop("""torchscript""" , self.torchscript )
__SCREAMING_SNAKE_CASE = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__SCREAMING_SNAKE_CASE = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowerCamelCase__ )
lowerCAmelCase__ = field(default=__UpperCAmelCase , metadata={"help": "Trace the models using torchscript"} )
lowerCAmelCase__ = field(default=__UpperCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowerCAmelCase__ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__SCREAMING_SNAKE_CASE = torch.device("""cpu""" )
__SCREAMING_SNAKE_CASE = 0
elif is_torch_tpu_available():
__SCREAMING_SNAKE_CASE = xm.xla_device()
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self.n_gpu > 0
| 267 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
from math import factorial
A = {str(d): factorial(d) for d in range(10)}
def __A ( a_ :int) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(_snake_case))
def __A ( ) -> int:
__a : Any = 7 * factorial(9) + 1
return sum(i for i in range(3 , _snake_case) if sum_of_digit_factorial(_snake_case) == i)
if __name__ == "__main__":
print(F'{solution() = }') | 352 |
"""simple docstring"""
def __A ( a_ :int , a_ :int) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''')
__a : Union[str, Any] = str(bin(a_))[2:] # remove the leading "0b"
__a : Union[str, Any] = str(bin(a_))[2:] # remove the leading "0b"
__a : Optional[Any] = max(len(a_) , len(a_))
return "0b" + "".join(
str(int(char_a != char_b))
for char_a, char_b in zip(a_binary.zfill(a_) , b_binary.zfill(a_)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = JukeboxTokenizer
lowerCamelCase__ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def A ( self : str ) -> Tuple:
import torch
UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase : Optional[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase : List[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A ( self : Optional[Any] ) -> str:
import torch
UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : str = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : List[str] = use_token_type_ids
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : List[str] = num_choices
lowerCAmelCase : int = scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[int] = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
lowerCAmelCase : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Optional[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a : Optional[int] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
a : Any =(
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =False
a : List[Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = OpenLlamaModelTester(self )
lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : Optional[Any] = input_dict["input_ids"]
lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : str = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : str = 3
lowerCAmelCase : str = "single_label_classification"
lowerCAmelCase : Any = input_dict["input_ids"]
lowerCAmelCase : int = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = 3
lowerCAmelCase : Union[str, Any] = "multi_label_classification"
lowerCAmelCase : Dict = input_dict["input_ids"]
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : List[str] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : Union[str, Any] = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(lowercase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : Optional[int] = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase : List[Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowerCAmelCase : Tuple = scaled_model(lowercase_ ).last_hidden_state
lowerCAmelCase : Any = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
| 351 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 133 | 0 |
import argparse
from collections import defaultdict
import yaml
_lowerCamelCase : str = "docs/source/en/_toctree.yml"
def a__ ( UpperCAmelCase : str ) -> List[str]:
UpperCAmelCase : Tuple = defaultdict(UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : Union[str, Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : List[str] = []
for duplicate_key in duplicates:
UpperCAmelCase : str = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCAmelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCAmelCase , key=lambda UpperCAmelCase : s["title"].lower() )
def a__ ( UpperCAmelCase : str=False ) -> List[Any]:
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase : Dict = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : Optional[int] = api_doc[model_idx]['''sections''']
UpperCAmelCase : Tuple = [(idx, section) for idx, section in enumerate(UpperCAmelCase ) if '''sections''' in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : str = modality_doc['''sections''']
UpperCAmelCase : Union[str, Any] = clean_model_doc_toc(UpperCAmelCase )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Any = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Tuple = model_doc
UpperCAmelCase : Any = api_doc
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCamelCase : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 336 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCAmelCase :
def __magic_name__ ( self : int, __A : Dict ):
raise NotImplementedError()
def __magic_name__ ( self : int ):
raise NotImplementedError()
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : str, __A : "AutoTokenizer", __A : bool = False, **__A : str ):
UpperCAmelCase : List[str] = tokenizer
UpperCAmelCase : str = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = True
def __magic_name__ ( self : Dict, __A : Optional[int] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : Optional[int] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : Union[str, Any] = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : Optional[Any] = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def __magic_name__ ( self : str ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase : int = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : str = True
self.on_finalized_text(__A, stream_end=__A )
def __magic_name__ ( self : List[str], __A : str, __A : bool = False ):
print(__A, flush=__A, end='''''' if not stream_end else None )
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : "AutoTokenizer", __A : bool = False, __A : Optional[float] = None, **__A : str ):
super().__init__(__A, __A, **__A )
UpperCAmelCase : Dict = Queue()
UpperCAmelCase : Any = None
UpperCAmelCase : Any = timeout
def __magic_name__ ( self : Dict, __A : str, __A : bool = False ):
self.text_queue.put(__A, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self : int ):
return self
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 336 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Dict = os.path.abspath(snake_case )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__: Optional[Any] = tf.train.list_variables(snake_case )
lowercase__: List[Any] = []
lowercase__: Tuple = []
lowercase__: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__: Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__: str = name[1:]
# figure out how many levels deep the name is
lowercase__: Optional[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
lowercase__: Optional[Any] = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(f'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
lowercase__: Any = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
lowercase__: Optional[int] = full_name.split('/' )
lowercase__: List[Any] = model
lowercase__: Any = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__: Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'embeddings' )
lowercase__: int = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__: int = getattr(snake_case , 'encoder' )
lowercase__: List[str] = getattr(snake_case , 'layer' )
lowercase__: Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__: Tuple = getattr(snake_case , 'pooler' )
lowercase__: Tuple = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__: Dict = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__: Optional[Any] = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
lowercase__: List[str] = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__: int = getattr(snake_case , 'attention' )
lowercase__: Union[str, Any] = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'attention' )
lowercase__: Optional[Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__: Optional[Any] = getattr(snake_case , 'attention' )
lowercase__: Optional[int] = getattr(snake_case , 'output' )
lowercase__: Optional[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__: Any = getattr(snake_case , 'output' )
lowercase__: str = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__: Tuple = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__: List[str] = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__: Optional[int] = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__: Any = getattr(snake_case , 'intermediate' )
lowercase__: str = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__: str = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__: Tuple = getattr(snake_case , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__: Any = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
lowercase__: Union[str, Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__: str = array.transpose()
if pointer.shape == array.shape:
lowercase__: Union[str, Any] = torch.from_numpy(snake_case )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__: int = BertConfig.from_json_file(snake_case )
lowercase__: Tuple = BertModel(snake_case )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 288 | 1 |
import operator as op
SCREAMING_SNAKE_CASE__ = """scaler.pt"""
SCREAMING_SNAKE_CASE__ = """pytorch_model"""
SCREAMING_SNAKE_CASE__ = """random_states"""
SCREAMING_SNAKE_CASE__ = """optimizer"""
SCREAMING_SNAKE_CASE__ = """scheduler"""
SCREAMING_SNAKE_CASE__ = """pytorch_model.bin"""
SCREAMING_SNAKE_CASE__ = """pytorch_model.bin.index.json"""
SCREAMING_SNAKE_CASE__ = """model.safetensors"""
SCREAMING_SNAKE_CASE__ = """model.safetensors.index.json"""
SCREAMING_SNAKE_CASE__ = """1.10.2"""
SCREAMING_SNAKE_CASE__ = """py38"""
SCREAMING_SNAKE_CASE__ = """4.17.0"""
SCREAMING_SNAKE_CASE__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
SCREAMING_SNAKE_CASE__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
SCREAMING_SNAKE_CASE__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
SCREAMING_SNAKE_CASE__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
SCREAMING_SNAKE_CASE__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
SCREAMING_SNAKE_CASE__ = """2.0.1"""
SCREAMING_SNAKE_CASE__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
SCREAMING_SNAKE_CASE__ = ["""default""", """reduce-overhead""", """max-autotune"""]
SCREAMING_SNAKE_CASE__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
SCREAMING_SNAKE_CASE__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
SCREAMING_SNAKE_CASE__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 325 |
import os
import string
import sys
lowerCamelCase = 1 << 8
lowerCamelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase = KEYMAP['''up''']
lowerCamelCase = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase = []
lowerCamelCase = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase = ord(str(i))
def UpperCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
a__ ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
a__ =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
a__ =chr(KEYMAP['''esc'''] )
except KeyError:
a__ =cha[1]
else:
a__ =ch.decode(_A )
else:
a__ =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ =sys.stdin.fileno()
a__ =termios.tcgetattr(_A )
try:
tty.setraw(_A )
a__ =sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
a__ =get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
a__ =get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase_ = """\
"""
lowercase_ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowercase_ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[Any] , a : Union[str, Any] , a : int = 16 , a : bool = True , a : Union[str, Any]=None )-> str:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase__ = 'cuda'
else:
lowercase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase__ = AutoModelForCausalLM.from_pretrained(a )
lowercase__ = model.to(a )
lowercase__ = AutoTokenizer.from_pretrained(a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase__ = model.config.max_length - 1
else:
lowercase__ = model.config.max_length
lowercase__ = tokenizer(
a , add_special_tokens=a , padding=a , truncation=a , max_length=a , return_tensors='pt' , return_attention_mask=a , ).to(a )
lowercase__ = encodings['input_ids']
lowercase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase__ = []
lowercase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(a ) , a ) ):
lowercase__ = min(start_index + batch_size , len(a ) )
lowercase__ = encoded_texts[start_index:end_index]
lowercase__ = attn_masks[start_index:end_index]
if add_start_token:
lowercase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a )
lowercase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowercase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a ), attn_mask] , dim=1 )
lowercase__ = encoded_batch
with torch.no_grad():
lowercase__ = model(a , attention_mask=a ).logits
lowercase__ = out_logits[..., :-1, :].contiguous()
lowercase__ = labels[..., 1:].contiguous()
lowercase__ = attn_mask[..., 1:].contiguous()
lowercase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a )}
| 269 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 269 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , **A_ ) -> Optional[Any]:
__UpperCamelCase =feature_size
__UpperCamelCase =sampling_rate
__UpperCamelCase =padding_value
__UpperCamelCase =kwargs.pop('padding_side' , 'right' )
__UpperCamelCase =kwargs.pop('return_attention_mask' , A_ )
super().__init__(**A_ )
def _a ( self , A_ , A_ = True , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__UpperCamelCase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
__UpperCamelCase =processed_features[self.model_input_names[0]]
__UpperCamelCase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A_ ) == 0:
if return_attention_mask:
__UpperCamelCase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__UpperCamelCase =required_input[0]
if isinstance(A_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__UpperCamelCase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(A_ ):
__UpperCamelCase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A_ ):
__UpperCamelCase ='tf'
elif is_torch_tensor(A_ ):
__UpperCamelCase ='pt'
elif isinstance(A_ , (int, float, list, tuple, np.ndarray) ):
__UpperCamelCase ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(A_ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__UpperCamelCase =to_numpy(A_ )
else:
__UpperCamelCase =[to_numpy(A_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__UpperCamelCase =self._get_padding_strategies(padding=A_ , max_length=A_ )
__UpperCamelCase =processed_features[self.model_input_names[0]]
__UpperCamelCase =len(A_ )
if not all(len(A_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__UpperCamelCase =[]
for i in range(A_ ):
__UpperCamelCase ={k: v[i] for k, v in processed_features.items()}
# truncation
__UpperCamelCase =self._truncate(
A_ , max_length=A_ , pad_to_multiple_of=A_ , truncation=A_ , )
truncated_inputs.append(A_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__UpperCamelCase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__UpperCamelCase =PaddingStrategy.MAX_LENGTH
__UpperCamelCase ={}
for i in range(A_ ):
# padding
__UpperCamelCase =self._pad(
truncated_inputs[i] , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , )
for key, value in outputs.items():
if key not in batch_outputs:
__UpperCamelCase =[]
if value.dtype is np.dtype(np.floataa ):
__UpperCamelCase =value.astype(np.floataa )
batch_outputs[key].append(A_ )
return BatchFeature(A_ , tensor_type=A_ )
def _a ( self , A_ , A_ = None , A_ = PaddingStrategy.DO_NOT_PAD , A_ = None , A_ = None , ) -> dict:
__UpperCamelCase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__UpperCamelCase =len(A_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__UpperCamelCase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__UpperCamelCase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__UpperCamelCase =np.ones(len(A_ ) , dtype=np.intaa )
if needs_to_be_padded:
__UpperCamelCase =max_length - len(A_ )
if self.padding_side == "right":
if return_attention_mask:
__UpperCamelCase =np.pad(
processed_features['attention_mask'] , (0, difference) )
__UpperCamelCase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__UpperCamelCase =np.pad(
A_ , A_ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__UpperCamelCase =np.pad(
processed_features['attention_mask'] , (difference, 0) )
__UpperCamelCase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__UpperCamelCase =np.pad(
A_ , A_ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__UpperCamelCase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__UpperCamelCase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__UpperCamelCase =len(A_ ) > max_length
if needs_to_be_truncated:
__UpperCamelCase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__UpperCamelCase =processed_features['attention_mask'][:max_length]
return processed_features
def _a ( self , A_=False , A_=None ) -> Optional[int]:
# Get padding strategy
if padding is not False:
if padding is True:
__UpperCamelCase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A_ , A_ ):
__UpperCamelCase =PaddingStrategy(A_ )
elif isinstance(A_ , A_ ):
__UpperCamelCase =padding
else:
__UpperCamelCase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 62 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def lowerCamelCase ( a_ , a_ , a_=8 ) -> List[Any]:
lowerCAmelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase ( a_ , a_=512 , a_=512 ) -> Any:
lowerCAmelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCAmelCase_ = np.array(pil_image.convert('RGB' ) )
lowerCAmelCase_ = arr.astype(np.floataa ) / 127.5 - 1
lowerCAmelCase_ = np.transpose(__lowerCAmelCase , [2, 0, 1] )
lowerCAmelCase_ = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class a_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
lowerCAmelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Optional[int]:
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}''' )
lowerCAmelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCAmelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
lowerCAmelCase_ = torch.cat(snake_case__ , dim=0 )
else:
lowerCAmelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
lowerCAmelCase_ = self.movq.config.scaling_factor * init_latents
lowerCAmelCase_ = torch.cat([init_latents] , dim=0 )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
lowerCAmelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase_ = init_latents
return latents
def _lowercase ( self , lowercase_=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase_ = torch.device(f'''cuda:{gpu_id}''' )
lowerCAmelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowercase ( self , lowercase_=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
lowerCAmelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 1_0_0 , lowercase_ = 4.0 , lowercase_ = 0.3 , lowercase_ = 1 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self._execution_device
lowerCAmelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = torch.cat(snake_case__ , dim=0 )
lowerCAmelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
lowerCAmelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
lowerCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowerCAmelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
lowerCAmelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
lowerCAmelCase_ = self.movq.encode(snake_case__ )['''latents''']
lowerCAmelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
lowerCAmelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCAmelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
lowerCAmelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ = {'''image_embeds''': image_embeds}
lowerCAmelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase_ = noise_pred.chunk(2 )
lowerCAmelCase_ = variance_pred.chunk(2 )
lowerCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
lowerCAmelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCAmelCase_ = image * 0.5 + 0.5
lowerCAmelCase_ = image.clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase :
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) -> Dict:
pass
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_snake_case = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> str:
_a : Union[str, Any] = pipeline(
"""document-question-answering""" , model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
_a : Dict = INVOICE_URL
_a : List[Any] = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) )
_a : List[Any] = "What is the placebo?"
_a : Dict = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> Any:
_a : Optional[int] = dqa_pipeline(UpperCAmelCase__ , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
[
{"""score""": ANY(UpperCAmelCase__ ), """answer""": ANY(UpperCAmelCase__ ), """start""": ANY(UpperCAmelCase__ ), """end""": ANY(UpperCAmelCase__ )},
{"""score""": ANY(UpperCAmelCase__ ), """answer""": ANY(UpperCAmelCase__ ), """start""": ANY(UpperCAmelCase__ ), """end""": ANY(UpperCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
_a : Optional[int] = INVOICE_URL
_a : Dict = "How many cats are there?"
_a : Optional[Any] = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
_a : str = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ )
_a : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_a : Any = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_a : Optional[int] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(UpperCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
_a : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_a : Union[str, Any] = []
_a : List[Any] = []
_a : List[str] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , words=UpperCAmelCase__ , boxes=UpperCAmelCase__ , top_k=2 )
self.assertEqual(UpperCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
_a : Any = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
_a : Tuple = INVOICE_URL
_a : List[Any] = "What is the invoice number?"
_a : List[Any] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_a : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_a : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
_a : str = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
_a : int = INVOICE_URL
_a : List[str] = "What is the invoice number?"
_a : Tuple = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_a : Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_a : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[int] ) -> Any:
_a : List[str] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase__ )
_a : str = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase__ , revision="""3dc6de3""" , )
_a : str = INVOICE_URL
_a : Union[str, Any] = "What is the invoice number?"
_a : List[Any] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_a : Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_a : Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
_a : Any = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) )
# This model should also work if `image` is set to None
_a : Any = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase__ )
_a : str = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase__ , revision="""3dc6de3""" , max_seq_len=50 , )
_a : Union[str, Any] = INVOICE_URL
_a : int = "What is the invoice number?"
_a : int = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_a : Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
_a : Optional[Any] = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) )
# This model should also work if `image` is set to None
_a : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def _lowercase ( self : Any ) -> int:
_a : Dict = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
_a : List[str] = INVOICE_URL
_a : Dict = "What is the invoice number?"
_a : List[Any] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def _lowercase ( self : List[Any] ) -> Dict:
pass
| 294 |
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase__ ) == 1:
return list(lowerCamelCase__ )
lowercase__ : List[Any] = 0
lowercase__ : Any = len(lowerCamelCase__ ) - 1
lowercase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 130 | 0 |
'''simple docstring'''
from manim import *
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = VGroup(__a , __a ).arrange(__a , buff=0 )
UpperCAmelCase_ = Text("CPU" , font_size=24 )
UpperCAmelCase_ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
UpperCAmelCase_ = [mem.copy() for i in range(4 )]
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = Text("GPU" , font_size=24 )
UpperCAmelCase_ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = Text("Model" , font_size=24 )
UpperCAmelCase_ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, rect in enumerate(__a ):
UpperCAmelCase_ = fill.copy().set_fill(__a , opacity=0.8 )
target.move_to(__a )
model_arr.append(__a )
UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__a )
self.add(*__a , *__a )
UpperCAmelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = VGroup(*__a ).arrange(__a , buff=0 )
UpperCAmelCase_ = VGroup(__a , __a ).arrange(__a , buff=0 )
UpperCAmelCase_ = Text("Disk" , font_size=24 )
UpperCAmelCase_ = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4, -1.25, 0] )
self.add(__a , __a )
UpperCAmelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
UpperCAmelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
UpperCAmelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) )
UpperCAmelCase_ = Square(0.3 )
input.set_fill(__a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __a , buff=0.5 )
self.play(Write(__a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__a , buff=0.02 )
self.play(MoveToTarget(__a ) )
self.play(FadeOut(__a ) )
UpperCAmelCase_ = Arrow(start=__a , end=__a , color=__a , buff=0.5 )
a.next_to(model_arr[0].get_left() , __a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
UpperCAmelCase_ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase_ = AnimationGroup(
FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase_ = a_c
UpperCAmelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , )
UpperCAmelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) )
self.wait()
| 106 | '''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_: Tuple =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_: int ={'facebook/bart-base': BartTokenizer}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case_ , default=snake_case_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case_ , default=snake_case_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--config_name" , type=snake_case_ , default=snake_case_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case_ , default=snake_case_ , help="Where to store the final ONNX file." )
UpperCAmelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int="cpu" ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
UpperCAmelCase_ = "My friends are cool but they eat too many carbs."
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case_ , )
logger.info("Model exported to {}".format(snake_case_ ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case_ ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(snake_case_ )
UpperCAmelCase_ = ort_sess.run(
snake_case_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case_ ),
"max_length": np.array(snake_case_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case_ )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 106 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="""speech_to_text_2"""
__UpperCAmelCase : Any =["""past_key_values"""]
__UpperCAmelCase : Optional[Any] ={"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __a=1_00_00 , __a=6 , __a=20_48 , __a=4 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=2 , __a=True , __a=1 , __a=0 , __a=2 , __a=10_24 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 57 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57 | 1 |
'''simple docstring'''
import itertools
import os
import re
__a: Any = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
__a: Optional[Any] = re.compile(R"""([a-z\d])([A-Z])""")
__a: Optional[Any] = re.compile(R"""(?<!_)_(?!_)""")
__a: List[Any] = re.compile(R"""(_{2,})""")
__a: str = R"""^\w+(\.\w+)*$"""
__a: List[Any] = R"""<>:/\|?*"""
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[Any] = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
lowercase__ : List[Any] = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
return name.lower()
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = _single_underscore_re.split(lowerCAmelCase__ )
lowercase__ : Optional[Any] = [_multiple_underscores_re.split(lowerCAmelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase__ ) if n != '''''' )
def __UpperCamelCase ( UpperCAmelCase ):
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCAmelCase__ )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCAmelCase__ ):
raise ValueError(F"""Split name should match \'{_split_re}\'\' but got \'{split}\'.""" )
return F"""{filename_prefix_for_name(lowerCAmelCase__ )}-{split}"""
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
lowercase__ : str = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
lowercase__ : Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return F"""{filepath}*"""
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
lowercase__ : Optional[int] = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if shard_lengths:
lowercase__ : Tuple = len(lowerCAmelCase__ )
lowercase__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCAmelCase__ )]
if filetype_suffix:
lowercase__ : Tuple = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase__ : List[str] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 361 | '''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Any = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( ) -> str:
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowerCAmelCase = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(snake_case__ , '''all_results.json''' )
if os.path.exists(snake_case__ ):
with open(snake_case__ , '''r''' ) as f:
lowerCAmelCase = json.load(snake_case__ )
else:
raise ValueError(f"can\'t find {path}" )
return results
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
lowerCAmelCase = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( _lowercase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) ->str:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) ->Dict:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(__lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''image_classification_no_trainer''' ) ) )
| 338 | import numpy as np
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = ['''input_values''', '''attention_mask''']
def __init__( self , lowercase_ = 1 , lowercase_ = 1_6_0_0_0 , lowercase_ = 0.0 , lowercase_ = False , lowercase_ = 8_0 , lowercase_ = 1_6 , lowercase_ = 6_4 , lowercase_ = "hann_window" , lowercase_ = 1.0 , lowercase_ = 8_0 , lowercase_ = 7_6_0_0 , lowercase_ = 1e-10 , lowercase_ = 2 , lowercase_ = True , **lowercase_ , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = return_attention_mask
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = hop_length
lowerCAmelCase_ = win_length
lowerCAmelCase_ = win_function
lowerCAmelCase_ = frame_signal_scale
lowerCAmelCase_ = fmin
lowerCAmelCase_ = fmax
lowerCAmelCase_ = mel_floor
lowerCAmelCase_ = reduction_factor
lowerCAmelCase_ = win_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ = hop_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ = optimal_fft_length(self.sample_size )
lowerCAmelCase_ = (self.n_fft // 2) + 1
lowerCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowercase_ , lowercase_ , lowercase_ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase_ = np.array(lowercase_ , np.intaa )
lowerCAmelCase_ = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ):
lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ = padding_value
normed_input_values.append(lowercase_ )
else:
lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = spectrogram(
lowercase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowerCAmelCase_ = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase_ = inputs_target['input_values']
lowerCAmelCase_ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def _lowercase ( self , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ = isinstance(lowercase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
lowerCAmelCase_ = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase_ = [self._extract_mel_features(lowercase_ ) for waveform in speech]
lowerCAmelCase_ = BatchFeature({'input_values': features} )
lowerCAmelCase_ = self.num_mel_bins
else:
lowerCAmelCase_ = BatchFeature({'input_values': speech} )
lowerCAmelCase_ = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = feature_size_hack
# convert input values to correct format
lowerCAmelCase_ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase_ = (
attention_mask
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase_ , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase_ = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase_ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 14 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__snake_case = """"""
__snake_case = """"""
__snake_case = """"""
__snake_case = 1 # (0 is vertical, 1 is horizontal)
def _A ( ):
UpperCamelCase , UpperCamelCase :int = get_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = update_image_and_anno(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for index, image in enumerate(SCREAMING_SNAKE_CASE__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase :List[str] = random_chars(32 )
UpperCamelCase :Any = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase :Any = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , SCREAMING_SNAKE_CASE__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(SCREAMING_SNAKE_CASE__ )} with {file_name}''' )
UpperCamelCase :Dict = []
for anno in new_annos[index]:
UpperCamelCase :List[str] = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(SCREAMING_SNAKE_CASE__ )
with open(F'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :str = []
UpperCamelCase :Optional[Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , '''*.txt''' ) ):
UpperCamelCase :Tuple = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE__ ) as in_file:
UpperCamelCase :Tuple = in_file.readlines()
UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{label_name}.jpg''' )
UpperCamelCase :Dict = []
for obj_list in obj_lists:
UpperCamelCase :int = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE__ )
labels.append(SCREAMING_SNAKE_CASE__ )
return img_paths, labels
def _A ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 1 ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :str = []
UpperCamelCase :Any = []
for idx in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Any = []
UpperCamelCase :Tuple = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = anno_list[idx]
UpperCamelCase :List[Any] = cva.imread(SCREAMING_SNAKE_CASE__ )
if flip_type == 1:
UpperCamelCase :Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for bbox in img_annos:
UpperCamelCase :Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase :Union[str, Any] = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for bbox in img_annos:
UpperCamelCase :Tuple = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE__ )
new_imgs_list.append(SCREAMING_SNAKE_CASE__ )
return new_imgs_list, new_annos_lists, path_list
def _A ( SCREAMING_SNAKE_CASE__ : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase :List[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 259 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[Any] = ['''image_processor''', '''tokenizer''']
_UpperCamelCase : Optional[int] = '''CLIPImageProcessor'''
_UpperCamelCase : List[str] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = kwargs.pop("feature_extractor" )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCamelCase_ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
UpperCamelCase_ = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
UpperCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: int , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: str , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: Dict ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer.model_input_names
UpperCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 328 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = R'\w+[.]\d+'
UpperCamelCase = re.findall(__snake_case , __snake_case )
for pat in pats:
UpperCamelCase = key.replace(__snake_case , '_'.join(pat.split('.' ) ) )
return key
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( A__ , A__ , A__=42 ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase = flax_model.init_weights(PRNGKey(__snake_case ) )
UpperCamelCase = flatten_dict(__snake_case )
UpperCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = rename_key(__snake_case )
UpperCamelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(__snake_case , __snake_case , __snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
| 28 |
"""simple docstring"""
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCamelCase = [
[-1 for i in range(total + 1)] for j in range(2 ** len(__a))
]
_UpperCamelCase = defaultdict(__a) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCamelCase = (1 << len(__a)) - 1
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCamelCase = self.count_ways_until(__a , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
_UpperCamelCase = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(__a)):
for j in task_performed[i]:
self.task[j].append(__a)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
_a = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_a = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 194 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = b.T
UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCAmelCase_ ) , axis=1 )
UpperCAmelCase : List[Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 )
UpperCAmelCase : List[str] = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = x.reshape(-1 , 3 )
UpperCAmelCase : Optional[int] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ )
return np.argmin(UpperCAmelCase_ , axis=1 )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = size if size is not None else {'height': 256, 'width': 256}
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
UpperCAmelCase : str = np.array(lowercase_ ) if clusters is not None else None
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = do_color_quantize
def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size['height'], size['width']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
UpperCAmelCase : int = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
UpperCAmelCase : Dict = image - 1
return image
def UpperCAmelCase_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : List[str] , ) -> PIL.Image.Image:
UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Optional[Any] = size if size is not None else self.size
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
UpperCAmelCase : List[str] = np.array(lowercase_ )
UpperCAmelCase : int = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : Tuple = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
UpperCAmelCase : List[str] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase : int = np.array(lowercase_ )
UpperCAmelCase : str = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase : Optional[int] = images.shape[0]
UpperCAmelCase : Union[str, Any] = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase : int = list(lowercase_ )
else:
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Any = {'input_ids': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 368 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __A ( _a ):
"""simple docstring"""
__lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 81 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : int = 32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase :List[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase :List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase :Union[str, Any] = config["""lr"""]
UpperCamelCase :List[str] = int(config["""num_epochs"""] )
UpperCamelCase :str = int(config["""seed"""] )
UpperCamelCase :Dict = int(config["""batch_size"""] )
UpperCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCamelCase :Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase :Any = 1
UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase :int = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase :Tuple = 0
# Now we train the model
UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Dict = outputs.loss
UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase :str = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__magic_name__ ) - 1:
UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCamelCase :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __magic_name__ )
UpperCamelCase :Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase :str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , )
parser.add_argument(
"""--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Tuple = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
_a = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
_a = "segformer.encoder." + key
if key.startswith('''backbone''' ):
_a = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_a = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(_lowerCAmelCase )-1}' )
if "norm" in key:
_a = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
_a = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(_lowerCAmelCase )-1}' )
if "layer_norm1" in key:
_a = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_a = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_a = key[key.find('''block''' ) + len('''block''' )]
_a = key.replace(f'block{idx}' , f'block.{int(_lowerCAmelCase )-1}' )
if "attn.q" in key:
_a = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_a = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_a = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_a = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_a = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_a = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_a = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_a = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_a = key.replace(f'linear_c{idx}' , f'linear_c.{int(_lowerCAmelCase )-1}' )
if key.startswith('''head''' ):
_a = key.replace('''head''' , '''classifier''' )
_a = value
return new_state_dict
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_a = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_a = kv_weight[
: config.hidden_sizes[i], :
]
_a = kv_bias[: config.hidden_sizes[i]]
_a = kv_weight[
config.hidden_sizes[i] :, :
]
_a = kv_bias[
config.hidden_sizes[i] :
]
def snake_case_ ():
'''simple docstring'''
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
_a = SegformerConfig()
_a = False
# set attributes based on model_name
_a = "huggingface/label-files"
if "segformer" in model_name:
_a = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
_a = 150
_a = "ade20k-id2label.json"
_a = (1, 150, 128, 128)
elif "city" in model_name:
_a = 19
_a = "cityscapes-id2label.json"
_a = (1, 19, 128, 128)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
_a = True
_a = model_name[4:6]
_a = 1000
_a = "imagenet-1k-id2label.json"
_a = (1, 1000)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
_a = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_a = [64, 128, 320, 512]
_a = 256
elif size == "b2":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 4, 6, 3]
elif size == "b3":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 4, 18, 3]
elif size == "b4":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 8, 27, 3]
elif size == "b5":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
_a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase )
# prepare image
_a = prepare_img()
_a = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_a = torch.load(_lowerCAmelCase , map_location=torch.device('''cpu''' ) )
else:
_a = torch.load(_lowerCAmelCase , map_location=torch.device('''cpu''' ) )["state_dict"]
# rename keys
_a = rename_keys(_lowerCAmelCase , encoder_only=_lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCAmelCase , _lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
_a = False
_a = SegformerForImageClassification(_lowerCAmelCase )
else:
_a = SegformerForSemanticSegmentation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# forward pass
_a = model(_lowerCAmelCase )
_a = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_a = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_a = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_a = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_a = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_a = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_a = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_a = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_a = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_a = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_a = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_a = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_a = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_a = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_a = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_a = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
_a = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_snake_case : int = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 354 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 |
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.abs((a - b) ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = {'vision_model': vision_model, 'text_model': text_model}
UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
UpperCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = after_output[0]
UpperCAmelCase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
UpperCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : int = to_atuple(vision_model.config.image_size )
UpperCAmelCase_ : Dict = to_atuple(vision_model.config.patch_size )
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase_ : Any = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
pt_model.to(__SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
UpperCAmelCase_ : Any = inputs_dict
UpperCAmelCase_ : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = pt_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
UpperCAmelCase_ : int = fx_model(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_pt=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = fx_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = VisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE , from_flax=__SCREAMING_SNAKE_CASE )
pt_model_loaded.to(__SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = pt_model_loaded(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4E-2 )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = fx_state
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = VisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = config_inputs_dict.pop('vision_config' )
UpperCAmelCase_ : int = config_inputs_dict.pop('text_config' )
UpperCAmelCase_ : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_pretrained_model_and_inputs()
UpperCAmelCase_ : str = model_a(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = model_a(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = after_outputs[0]
UpperCAmelCase_ : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = 1_3
UpperCAmelCase_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ : Dict = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = FlaxViTModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = FlaxViTModelTester(self )
UpperCAmelCase_ : int = FlaxBertModelTester(self )
UpperCAmelCase_ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__SCREAMING_SNAKE_CASE , text_from_pt=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : int = 1_3
UpperCAmelCase_ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase_ : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = FlaxCLIPVisionModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = FlaxCLIPVisionModelTester(self )
UpperCAmelCase_ : int = FlaxBertModelTester(self )
UpperCAmelCase_ : Tuple = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = vision_config_and_inputs
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCAmelCase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase_ : Dict = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='np' )
UpperCAmelCase_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 352 | '''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Optional[int] = '''1'''
snake_case__ : str = '''0'''
snake_case__ : List[str] = '''1'''
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ : str = ort.RunOptions()
snake_case__ : List[Any] = 128
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ : Union[str, Any] = time.time()
snake_case__ : str = 2000
snake_case__ : Tuple = {}
for iter in range(max_iters):
snake_case__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 274 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.