code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
snake_case__ : Tuple = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ) -> Dict:
requires_backends(self , ['bs4'] )
super().__init__(**_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase_ = parent.find_all(child.name , recursive=_UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) )
UpperCamelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = BeautifulSoup(_UpperCAmelCase , 'html.parser' )
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = []
for element in html_code.descendants:
if type(_UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase_ = html.unescape(_UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.xpath_soup(_UpperCAmelCase )
stringaxtag_seq.append(_UpperCAmelCase )
stringaxsubs_seq.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = ''
for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , _UpperCAmelCase ) -> BatchFeature:
UpperCamelCase_ = False
# Check that strings has a valid type
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = True
elif isinstance(_UpperCAmelCase , (list, tuple) ):
if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ):
UpperCamelCase_ = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"""but is of type {type(_UpperCAmelCase )}.""" )
UpperCamelCase_ = bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) )
if not is_batched:
UpperCamelCase_ = [html_strings]
# Get nodes + xpaths
UpperCamelCase_ = []
UpperCamelCase_ = []
for html_string in html_strings:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.get_three_from_single(_UpperCAmelCase )
nodes.append(_UpperCAmelCase )
UpperCamelCase_ = []
for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase )
xpath_strings.append(_UpperCAmelCase )
xpaths.append(_UpperCAmelCase )
# return as Dict
UpperCamelCase_ = {'nodes': nodes, 'xpaths': xpaths}
UpperCamelCase_ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 717 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Any:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
import os
def _snake_case ():
UpperCamelCase_ = os.path.join(os.path.dirname(__lowercase) , 'num.txt')
with open(__lowercase) as file_hand:
return str(sum(int(__lowercase) for line in file_hand))[:10]
if __name__ == "__main__":
print(solution())
| 718 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git_vision_model"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
UpperCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git"""
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
UpperCamelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
UpperCamelCase_ = GitVisionConfig(**_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = num_image_with_embedding
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 618 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=125 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase_ = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase_ = len(set(filter(lambda _UpperCAmelCase : bool('extra_id' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = extra_ids
UpperCamelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCamelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCamelCase_ = len(self.special_tokens_encoder )
UpperCamelCase_ = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = self.vocab_size + i - n
UpperCamelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[int]:
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase_ = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = [chr(_UpperCAmelCase ) for i in text.encode('utf-8' )]
return tokens
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
if token in self.special_tokens_encoder:
UpperCamelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCamelCase_ = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
UpperCamelCase_ = self.unk_token_id
else:
UpperCamelCase_ = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
if index in self.special_tokens_decoder:
UpperCamelCase_ = self.special_tokens_decoder[index]
else:
UpperCamelCase_ = chr(index - self._num_special_tokens )
return token
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = B''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCamelCase_ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCamelCase_ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCamelCase_ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCamelCase_ = token.encode('utf-8' )
else:
UpperCamelCase_ = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
UpperCamelCase_ = bstring.decode('utf-8' , errors='ignore' )
return string
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
return ()
| 719 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = processor(text=_UpperCAmelCase )
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 618 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""image_processor""", """tokenizer"""]
A_ = """LayoutLMv2ImageProcessor"""
A_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[str]:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
UpperCamelCase_ = kwargs.pop('feature_extractor' )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ = features['words']
UpperCamelCase_ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
UpperCamelCase_ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCamelCase_ = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
UpperCamelCase_ = images
return encoded_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 720 |
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""image_processor""", """tokenizer"""]
A_ = """FlavaImageProcessor"""
A_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
UpperCamelCase_ = kwargs.pop('feature_extractor' )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase_ = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if images is not None:
UpperCamelCase_ = self.image_processor(
_UpperCAmelCase , return_image_mask=_UpperCAmelCase , return_codebook_pixels=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(_UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.tokenizer.model_input_names
UpperCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 721 |
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case__ : Tuple = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=[0, 1, 2, 3] , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=[1, 384, 24, 24] , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A_ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 'add'
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_UpperCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase_ = deprecated_arg[3:]
setattr(self , _UpperCAmelCase , not kwargs.pop(_UpperCAmelCase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCamelCase_ = kwargs.pop('torchscript' , self.torchscript )
UpperCamelCase_ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCamelCase_ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**_UpperCAmelCase )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Trace the models using torchscript"""} )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
A_ = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCamelCase_ = torch.device('cpu' )
UpperCamelCase_ = 0
elif is_torch_tpu_available():
UpperCamelCase_ = xm.xla_device()
UpperCamelCase_ = 0
else:
UpperCamelCase_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase_ = torch.cuda.device_count()
return device, n_gpu
@property
def _UpperCAmelCase ( self ) -> Dict:
return is_torch_tpu_available() and self.tpu
@property
def _UpperCAmelCase ( self ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _UpperCAmelCase ( self ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.n_gpu > 0
| 701 |
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618 | 0 |
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 702 |
snake_case__ : List[Any] = """Tobias Carryer"""
from time import time
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=int(time() ) ) -> Tuple: # noqa: B008
UpperCamelCase_ = multiplier
UpperCamelCase_ = increment
UpperCamelCase_ = modulo
UpperCamelCase_ = seed
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : List[Any] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 618 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Tuple = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case__ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'])
def _snake_case (__lowercase , __lowercase):
inspect_dataset(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.parametrize('path' , ['accuracy'])
def _snake_case (__lowercase , __lowercase):
inspect_metric(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_info(__lowercase , config_name=__lowercase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_config_info(__lowercase , config_name=__lowercase)
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_names(__lowercase)
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_split_names(__lowercase , config_name=__lowercase)
| 618 | 0 |
def _snake_case (__lowercase , __lowercase):
if mass < 0:
raise ValueError('The mass of a body cannot be negative')
return 0.5 * mass * abs(__lowercase) * abs(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 704 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = parent
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {}
def _snake_case ():
UpperCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Any:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Dict:
# Initialize feature_extractor
UpperCamelCase_ = self.feature_extraction_class()
# Test not batched input
UpperCamelCase_ = get_html_strings()[0]
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
# Test batched
UpperCamelCase_ = get_html_strings()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
| 618 | 0 |
def _snake_case (__lowercase):
UpperCamelCase_ = len(__lowercase)
for i in range(length - 1):
UpperCamelCase_ = i
for k in range(i + 1 , __lowercase):
if collection[k] < collection[least]:
UpperCamelCase_ = k
if least != i:
UpperCamelCase_ , UpperCamelCase_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
snake_case__ : Any = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted)) | 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : str = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """layoutlmv3"""
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , _UpperCAmelCase=128 , _UpperCAmelCase=128 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Tuple:
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = max_ad_position_embeddings
UpperCamelCase_ = coordinate_size
UpperCamelCase_ = shape_size
UpperCamelCase_ = has_relative_attention_bias
UpperCamelCase_ = rel_pos_bins
UpperCamelCase_ = max_rel_pos
UpperCamelCase_ = has_spatial_attention_bias
UpperCamelCase_ = rel_ad_pos_bins
UpperCamelCase_ = max_rel_ad_pos
UpperCamelCase_ = text_embed
UpperCamelCase_ = visual_embed
UpperCamelCase_ = input_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = classifier_dropout
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.12""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
return 12
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase_ = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 618 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCamelCase_ = 'xvjiarui/stable-diffusion-2-inpainting'
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
UpperCamelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = 50
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = num_samples * [init_image]
UpperCamelCase_ = num_samples * [mask_image]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = pipeline.prepare_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# shard inputs and rng
UpperCamelCase_ = replicate(_UpperCAmelCase )
UpperCamelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = pipeline(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase )
UpperCamelCase_ = output.images.reshape(_UpperCAmelCase , 512 , 512 , 3 )
UpperCamelCase_ = images[0, 253:256, 253:256, -1]
UpperCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 706 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> int:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A_ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = ViTMSNModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Dict:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(2 )
UpperCamelCase_ = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
import qiskit
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = qiskit.Aer.get_backend('aer_simulator')
# Create a Quantum Circuit acting on the q register
UpperCamelCase_ = qiskit.QuantumCircuit(__lowercase , __lowercase)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1])
# Execute the circuit on the qasm simulator
UpperCamelCase_ = qiskit.execute(__lowercase , __lowercase , shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowercase)
if __name__ == "__main__":
snake_case__ : List[str] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 707 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 618 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case__ : Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class _a :
"""simple docstring"""
A_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """The column name of the images in the files."""} )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """A folder containing the training data."""} )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
A_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
A_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = {}
if self.train_dir is not None:
UpperCamelCase_ = self.train_dir
if self.validation_dir is not None:
UpperCamelCase_ = self.validation_dir
UpperCamelCase_ = data_files if data_files else None
@dataclass
class _a :
"""simple docstring"""
A_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
A_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
A_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
A_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
A_ = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _snake_case (__lowercase):
UpperCamelCase_ = torch.stack([example['pixel_values'] for example in examples])
return {"pixel_values": pixel_values}
def _snake_case ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __lowercase , __lowercase)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_ = training_args.get_process_log_level()
logger.setLevel(__lowercase)
transformers.utils.logging.set_verbosity(__lowercase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
logger.info(f"""Training/evaluation parameters {training_args}""")
# Detecting last checkpoint.
UpperCamelCase_ = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_ = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Initialize our dataset.
UpperCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase_ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase) and data_args.train_val_split > 0.0:
UpperCamelCase_ = ds['train'].train_test_split(data_args.train_val_split)
UpperCamelCase_ = split['train']
UpperCamelCase_ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase_ = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowercase)
elif model_args.model_name_or_path:
UpperCamelCase_ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowercase)
else:
UpperCamelCase_ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(f"""New config: {config}""")
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
})
# create image processor
if model_args.image_processor_name:
UpperCamelCase_ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase)
elif model_args.model_name_or_path:
UpperCamelCase_ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase)
else:
UpperCamelCase_ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase_ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch')
UpperCamelCase_ = ViTMAEForPreTraining(__lowercase)
if training_args.do_train:
UpperCamelCase_ = ds['train'].column_names
else:
UpperCamelCase_ = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase_ = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase_ = 'image'
elif "img" in column_names:
UpperCamelCase_ = 'img'
else:
UpperCamelCase_ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase_ = image_processor.size['shortest_edge']
else:
UpperCamelCase_ = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase_ = Compose(
[
Lambda(lambda __lowercase: img.convert('RGB') if img.mode != "RGB" else img),
RandomResizedCrop(__lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
def preprocess_images(__lowercase):
UpperCamelCase_ = [transforms(__lowercase) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset')
if data_args.max_train_samples is not None:
UpperCamelCase_ = ds['train'].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(__lowercase)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset')
if data_args.max_eval_samples is not None:
UpperCamelCase_ = (
ds['validation'].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase)
# Compute absolute learning rate
UpperCamelCase_ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase_ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase_ = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
UpperCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_ = last_checkpoint
UpperCamelCase_ = trainer.train(resume_from_checkpoint=__lowercase)
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics)
trainer.save_metrics('train' , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase_ = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase)
trainer.save_metrics('eval' , __lowercase)
# Write model card and (optionally) push to hub
UpperCamelCase_ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase)
else:
trainer.create_model_card(**__lowercase)
def _snake_case (__lowercase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 708 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 618 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = CpmAntTokenizer
A_ = False
def _UpperCAmelCase ( self ) -> int:
super().setUp()
UpperCamelCase_ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
UpperCamelCase_ = '今天天气真好!'
UpperCamelCase_ = ['今天', '天气', '真', '好', '!']
UpperCamelCase_ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = '今天天气真好!'
UpperCamelCase_ = [tokenizer.bos_token] + tokens
UpperCamelCase_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 709 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ : str = random.Random()
if is_torch_available():
import torch
def _snake_case (__lowercase , __lowercase=1.0 , __lowercase=None , __lowercase=None ):
if rng is None:
UpperCamelCase_ = global_rng
UpperCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=16000 , _UpperCAmelCase=True , _UpperCAmelCase=True , ) -> Optional[int]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = min_seq_length
UpperCamelCase_ = max_seq_length
UpperCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ = feature_size
UpperCamelCase_ = padding_value
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = return_attention_mask
UpperCamelCase_ = do_normalize
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> int:
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
UpperCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = ASTFeatureExtractor
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = ASTFeatureExtractionTester(self )
def _UpperCAmelCase ( self ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
UpperCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' ).input_values
UpperCamelCase_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_ = np.asarray(_UpperCAmelCase )
UpperCamelCase_ = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
UpperCamelCase_ = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
import torch
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
from datasets import load_dataset
UpperCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase_ = ds.sort('id' ).select(range(_UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
# fmt: off
UpperCamelCase_ = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCamelCase_ = self._load_datasamples(1 )
UpperCamelCase_ = ASTFeatureExtractor()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) )
| 710 |
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = 5
# Realm tok
UpperCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_config()
UpperCamelCase_ = self.get_dummy_retriever()
UpperCamelCase_ = retriever.tokenizer
UpperCamelCase_ = np.array([0, 3] , dtype='long' )
UpperCamelCase_ = tokenizer(['Test question'] ).input_ids
UpperCamelCase_ = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
UpperCamelCase_ = config.reader_seq_len
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(len(_UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_config()
UpperCamelCase_ = self.get_dummy_retriever()
UpperCamelCase_ = retriever.tokenizer
UpperCamelCase_ = np.array([0, 3, 5] , dtype='long' )
UpperCamelCase_ = tokenizer(['Test question'] ).input_ids
UpperCamelCase_ = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
UpperCamelCase_ = config.reader_seq_len
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
self.assertEqual([False, True, True] , _UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
UpperCamelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
UpperCamelCase_ = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 711 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618 | 0 |
def _snake_case (__lowercase , __lowercase):
return x if y == 0 else greatest_common_divisor(__lowercase , x % y)
def _snake_case (__lowercase , __lowercase):
return (x * y) // greatest_common_divisor(__lowercase , __lowercase)
def _snake_case (__lowercase = 20):
UpperCamelCase_ = 1
for i in range(1 , n + 1):
UpperCamelCase_ = lcm(__lowercase , __lowercase)
return g
if __name__ == "__main__":
print(f'{solution() = }')
| 712 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618 | 0 |
snake_case__ : Optional[int] = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
snake_case__ : int = {value: key for key, value in encode_dict.items()}
def _snake_case (__lowercase):
UpperCamelCase_ = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces')
return encoded
def _snake_case (__lowercase):
if set(__lowercase) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces')
UpperCamelCase_ = ''
for word in coded.split():
while len(__lowercase) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case (__lowercase , __lowercase , __lowercase = "x" , __lowercase = 10**-10 , __lowercase = 1 , ):
UpperCamelCase_ = symbols(__lowercase)
UpperCamelCase_ = lambdify(__lowercase , __lowercase)
UpperCamelCase_ = lambdify(__lowercase , diff(__lowercase , __lowercase))
UpperCamelCase_ = starting_point
while True:
if diff_function(__lowercase) != 0:
UpperCamelCase_ = prev_guess - multiplicity * func(__lowercase) / diff_function(
__lowercase)
else:
raise ZeroDivisionError('Could not find root') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
UpperCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 618 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _snake_case (__lowercase = 3):
if isinstance(__lowercase , __lowercase):
raise TypeError('number of qubits must be a integer.')
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.')
if math.floor(__lowercase) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.')
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).')
UpperCamelCase_ = QuantumRegister(__lowercase , 'qr')
UpperCamelCase_ = ClassicalRegister(__lowercase , 'cr')
UpperCamelCase_ = QuantumCircuit(__lowercase , __lowercase)
UpperCamelCase_ = number_of_qubits
for i in range(__lowercase):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(__lowercase):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowercase , __lowercase)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(__lowercase , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(__lowercase , __lowercase)
# simulate with 10000 shots
UpperCamelCase_ = Aer.get_backend('qasm_simulator')
UpperCamelCase_ = execute(__lowercase , __lowercase , shots=10000)
return job.result().get_counts(__lowercase)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 714 |
def _snake_case (__lowercase = 1000):
UpperCamelCase_ , UpperCamelCase_ = 1, 1
UpperCamelCase_ = 2
while True:
UpperCamelCase_ = 0
UpperCamelCase_ = fa + fa
UpperCamelCase_ , UpperCamelCase_ = fa, f
index += 1
for _ in str(__lowercase):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 618 | 0 |
'''simple docstring'''
snake_case__ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
snake_case__ : List[str] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
snake_case__ : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
snake_case__ : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
snake_case__ : Tuple = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = RealmTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = strip_accents
UpperCamelCase_ = tokenize_chinese_chars
UpperCamelCase_ = normalizer_class(**_UpperCAmelCase )
UpperCamelCase_ = do_lower_case
def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = PaddingStrategy.MAX_LENGTH
UpperCamelCase_ = text
UpperCamelCase_ = kwargs.pop('text_pair' , _UpperCAmelCase )
UpperCamelCase_ = kwargs.pop('return_tensors' , _UpperCAmelCase )
UpperCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_UpperCAmelCase ):
if batch_text_pair is not None:
UpperCamelCase_ = batch_text_pair[idx]
else:
UpperCamelCase_ = None
UpperCamelCase_ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = encoded_candidates.get('input_ids' )
UpperCamelCase_ = encoded_candidates.get('attention_mask' )
UpperCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_UpperCAmelCase )
UpperCamelCase_ = {key: item for key, item in output_data.items() if len(_UpperCAmelCase ) != 0}
return BatchEncoding(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Union[str, Any]:
UpperCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 618 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase_ = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
UpperCamelCase_ = f"""{src_lang}-{tgt_lang}"""
UpperCamelCase_ = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=__lowercase , exist_ok=__lowercase)
UpperCamelCase_ = os.path.join(__lowercase , 'README.md')
print(f"""Generating {path}""")
with open(__lowercase , 'w' , encoding='utf-8') as f:
f.write(__lowercase)
# make sure we are under the root of the project
snake_case__ : Optional[Any] = Path(__file__).resolve().parent.parent.parent
snake_case__ : Dict = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case__ : Tuple = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case__ : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 618 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case__ : Optional[int] = get_tests_dir("""fixtures""")
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_ = mock.Mock()
UpperCamelCase_ = 500
UpperCamelCase_ = {}
UpperCamelCase_ = HTTPError
UpperCamelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase ) as mock_head:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase_ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='test-feature-extractor' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase_ = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 717 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Any:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
snake_case__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> np.ndarray:
if self.framework == "tf":
UpperCamelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> np.ndarray:
UpperCamelCase_ = self.get_masked_index(_UpperCAmelCase )
UpperCamelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCamelCase_ = self.framework
UpperCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.model(**_UpperCAmelCase )
UpperCamelCase_ = model_inputs['input_ids']
return model_outputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase_ = target_ids.shape[0]
UpperCamelCase_ = model_outputs['input_ids'][0]
UpperCamelCase_ = model_outputs['logits']
if self.framework == "tf":
UpperCamelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase_ = outputs.numpy()
UpperCamelCase_ = outputs[0, masked_index, :]
UpperCamelCase_ = stable_softmax(_UpperCAmelCase , axis=-1 )
if target_ids is not None:
UpperCamelCase_ = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase_ = tf.expand_dims(_UpperCAmelCase , 0 )
UpperCamelCase_ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase_ = outputs[0, masked_index, :]
UpperCamelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase_ = probs[..., target_ids]
UpperCamelCase_ , UpperCamelCase_ = probs.topk(_UpperCAmelCase )
UpperCamelCase_ = []
UpperCamelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase_ = target_ids[p].tolist()
UpperCamelCase_ = p
# Filter padding out:
UpperCamelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = [targets]
try:
UpperCamelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase_ = {}
UpperCamelCase_ = []
for target in targets:
UpperCamelCase_ = vocab.get(_UpperCAmelCase , _UpperCAmelCase )
if id_ is None:
UpperCamelCase_ = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['input_ids']
if len(_UpperCAmelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
UpperCamelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase_ = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
UpperCamelCase_ = np.array(_UpperCAmelCase )
return target_ids
def _UpperCAmelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[int]:
UpperCamelCase_ = {}
if targets is not None:
UpperCamelCase_ = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = target_ids
if top_k is not None:
UpperCamelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 718 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git_vision_model"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
UpperCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git"""
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
UpperCamelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
UpperCamelCase_ = GitVisionConfig(**_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = num_image_with_embedding
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 618 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase_ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = 'sgugger/tiny-distilbert-classification'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
UpperCamelCase_ = None
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = 'sshleifer/tinier_bart'
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = 'sshleifer/tinier_bart'
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(_UpperCAmelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(_UpperCAmelCase , 'env.csv' ) , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_UpperCAmelCase ):
self.assertTrue(hasattr(_UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , 'log.txt' ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
UpperCamelCase_ = PyTorchBenchmark(_UpperCAmelCase )
UpperCamelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'log.txt' ) ).exists() )
| 719 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = processor(text=_UpperCAmelCase )
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 618 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case ( __lowercase):
if not sentence:
return ""
UpperCamelCase_ = dict(zip(__lowercase , __lowercase))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720 |
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=100 , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=[0, 1, 2, 3] , ) -> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = 100
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = out_indices
UpperCamelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ) -> str:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = BeitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
UpperCamelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9174], [-3.2_4_5_6, 0.4_9_4_8, -13.9401], [-3.2_0_3_3, 0.5_1_2_1, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCamelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase_ = model.to(_UpperCAmelCase )
UpperCamelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCamelCase_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase_ = Image.open(ds[0]['file'] )
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCamelCase_ = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase_ = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_UpperCAmelCase , )
else:
UpperCamelCase_ = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase_ = model.to(_UpperCAmelCase )
UpperCamelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCamelCase_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase_ = Image.open(ds[0]['file'] )
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.logits.detach().cpu()
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCamelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 721 |
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case__ : int = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """AutoTokenizer"""
A_ = ["""tokenizer"""]
A_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> int:
super().__init__(_UpperCAmelCase )
UpperCamelCase_ = speaker_embeddings
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase="speaker_embeddings_path.json" , **_UpperCAmelCase ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
UpperCamelCase_ = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(_UpperCAmelCase , _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
UpperCamelCase_ = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
UpperCamelCase_ = json.load(_UpperCAmelCase )
else:
UpperCamelCase_ = None
UpperCamelCase_ = AutoTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase , speaker_embeddings=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase="speaker_embeddings_path.json" , _UpperCAmelCase="speaker_embeddings" , _UpperCAmelCase = False , **_UpperCAmelCase , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase , _UpperCAmelCase , 'v2' ) , exist_ok=_UpperCAmelCase )
UpperCamelCase_ = {}
UpperCamelCase_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase_ = self._load_voice_preset(_UpperCAmelCase )
UpperCamelCase_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_UpperCAmelCase , )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , f"""{prompt_key}_{key}.npy""" )
UpperCamelCase_ = tmp_dict
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase = None , **_UpperCAmelCase ) -> int:
UpperCamelCase_ = self.speaker_embeddings[voice_preset]
UpperCamelCase_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
UpperCamelCase_ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCAmelCase ) , force_download=kwargs.pop('force_download' , _UpperCAmelCase ) , proxies=kwargs.pop('proxies' , _UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCAmelCase ) , revision=kwargs.pop('revision' , _UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
UpperCamelCase_ = np.load(_UpperCAmelCase )
return voice_preset_dict
def _UpperCAmelCase ( self , _UpperCAmelCase = None ) -> Optional[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Tuple:
if voice_preset is not None and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase_ = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
UpperCamelCase_ = voice_preset + '.npz'
UpperCamelCase_ = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
UpperCamelCase_ = self.tokenizer(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
if voice_preset is not None:
UpperCamelCase_ = voice_preset
return encoded_text
| 700 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=[0, 1, 2, 3] , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=[1, 384, 24, 24] , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A_ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 'add'
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = DPTForDepthEstimation(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618 | 0 |
def _snake_case (__lowercase):
UpperCamelCase_ = []
UpperCamelCase_ = set({'(', '[', '{'})
UpperCamelCase_ = set({')', ']', '}'})
UpperCamelCase_ = {'{': '}', '[': ']', '(': ')'}
for i in range(len(__lowercase)):
if s[i] in open_brackets:
stack.append(s[i])
elif s[i] in closed_brackets and (
len(__lowercase) == 0 or (len(__lowercase) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowercase) == 0
def _snake_case ():
UpperCamelCase_ = input('Enter sequence of brackets: ')
if is_balanced(__lowercase):
print(__lowercase , 'is balanced')
else:
print(__lowercase , 'is not balanced')
if __name__ == "__main__":
main()
| 702 |
snake_case__ : List[Any] = """Tobias Carryer"""
from time import time
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=int(time() ) ) -> Tuple: # noqa: B008
UpperCamelCase_ = multiplier
UpperCamelCase_ = increment
UpperCamelCase_ = modulo
UpperCamelCase_ = seed
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : List[Any] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 618 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase ) -> Dict:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = d_embed
UpperCamelCase_ = d_proj
UpperCamelCase_ = cutoffs + [vocab_size]
UpperCamelCase_ = [0] + self.cutoffs
UpperCamelCase_ = div_val
UpperCamelCase_ = self.cutoffs[0]
UpperCamelCase_ = len(self.cutoffs ) - 1
UpperCamelCase_ = self.shortlist_size + self.n_clusters
UpperCamelCase_ = keep_order
UpperCamelCase_ = []
UpperCamelCase_ = []
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
if self.n_clusters > 0:
UpperCamelCase_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_UpperCAmelCase , name='cluster_weight' )
UpperCamelCase_ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_UpperCAmelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" , )
self.out_projs.append(_UpperCAmelCase )
else:
self.out_projs.append(_UpperCAmelCase )
UpperCamelCase_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase_ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ , UpperCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ = self.d_embed // (self.div_val**i)
UpperCamelCase_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" )
self.out_projs.append(_UpperCAmelCase )
UpperCamelCase_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_UpperCAmelCase )
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[Any]:
UpperCamelCase_ = x
if proj is not None:
UpperCamelCase_ = tf.einsum('ibd,ed->ibe' , _UpperCAmelCase , _UpperCAmelCase )
return tf.einsum('ibd,nd->ibn' , _UpperCAmelCase , _UpperCAmelCase ) + b
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = shape_list(_UpperCAmelCase )
UpperCamelCase_ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase_ = tf.stack([r, target] , 1 )
return tf.gather_nd(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=False ) -> Optional[Any]:
UpperCamelCase_ = 0
if self.n_clusters == 0:
UpperCamelCase_ = self._logit(_UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
UpperCamelCase_ = tf.nn.log_softmax(_UpperCAmelCase , axis=-1 )
else:
UpperCamelCase_ = shape_list(_UpperCAmelCase )
UpperCamelCase_ = []
UpperCamelCase_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ , UpperCamelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase_ = (target >= l_idx) & (target < r_idx)
UpperCamelCase_ = tf.where(_UpperCAmelCase )
UpperCamelCase_ = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase ) - l_idx
if self.div_val == 1:
UpperCamelCase_ = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase_ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase_ = self.out_layers[i][0]
UpperCamelCase_ = self.out_layers[i][1]
if i == 0:
UpperCamelCase_ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase_ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase_ = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[0] )
UpperCamelCase_ = tf.nn.log_softmax(_UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase_ = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
else:
UpperCamelCase_ = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[i] )
UpperCamelCase_ = tf.nn.log_softmax(_UpperCAmelCase )
UpperCamelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_UpperCAmelCase )
if target is not None:
UpperCamelCase_ = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_UpperCAmelCase , -cur_logprob , shape_list(_UpperCAmelCase ) )
UpperCamelCase_ = tf.concat(_UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase_ = tf.reduce_mean(_UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_UpperCAmelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 703 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case__ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'])
def _snake_case (__lowercase , __lowercase):
inspect_dataset(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.parametrize('path' , ['accuracy'])
def _snake_case (__lowercase , __lowercase):
inspect_metric(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_info(__lowercase , config_name=__lowercase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_config_info(__lowercase , config_name=__lowercase)
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_names(__lowercase)
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_split_names(__lowercase , config_name=__lowercase)
| 618 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def _snake_case (__lowercase):
UpperCamelCase_ = torch.load(__lowercase , map_location='cpu')
if "model" in sd.keys():
UpperCamelCase_ = torch.load(__lowercase , map_location='cpu')['model']
# pop unnecessary weights
UpperCamelCase_ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowercase)
UpperCamelCase_ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase_ = sd.pop(__lowercase)
UpperCamelCase_ = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase_ = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase_ = key.replace('.qkv_proj.' , '.q_proj.')
UpperCamelCase_ = key.replace('.qkv_proj.' , '.k_proj.')
UpperCamelCase_ = key.replace('.qkv_proj.' , '.v_proj.')
UpperCamelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = torch.split(__lowercase , depth // 3 , dim=0)
UpperCamelCase_ = q
UpperCamelCase_ = k
UpperCamelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def _snake_case (__lowercase , __lowercase , __lowercase=None):
UpperCamelCase_ = load_checkpoint(__lowercase)
if config is not None:
UpperCamelCase_ = OPTConfig.from_pretrained(__lowercase)
else:
UpperCamelCase_ = OPTConfig()
UpperCamelCase_ = OPTModel(__lowercase).half().eval()
model.load_state_dict(__lowercase)
# Check results
Path(__lowercase).mkdir(exist_ok=__lowercase)
model.save_pretrained(__lowercase)
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
snake_case__ : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 704 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = parent
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {}
def _snake_case ():
UpperCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Any:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Dict:
# Initialize feature_extractor
UpperCamelCase_ = self.feature_extraction_class()
# Test not batched input
UpperCamelCase_ = get_html_strings()[0]
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
# Test batched
UpperCamelCase_ = get_html_strings()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
| 618 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _snake_case (__lowercase , __lowercase , __lowercase = None):
if version.parse(hfh.__version__).release < version.parse('0.11.0').release:
# old versions of hfh don't url-encode the file path
UpperCamelCase_ = quote(__lowercase)
return hfh.hf_hub_url(__lowercase , __lowercase , repo_type='dataset' , revision=__lowercase) | 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : str = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """layoutlmv3"""
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , _UpperCAmelCase=128 , _UpperCAmelCase=128 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Tuple:
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = max_ad_position_embeddings
UpperCamelCase_ = coordinate_size
UpperCamelCase_ = shape_size
UpperCamelCase_ = has_relative_attention_bias
UpperCamelCase_ = rel_pos_bins
UpperCamelCase_ = max_rel_pos
UpperCamelCase_ = has_spatial_attention_bias
UpperCamelCase_ = rel_ad_pos_bins
UpperCamelCase_ = max_rel_ad_pos
UpperCamelCase_ = text_embed
UpperCamelCase_ = visual_embed
UpperCamelCase_ = input_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = classifier_dropout
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.12""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
return 12
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase_ = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 618 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = 8
# DPR tok
UpperCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_ = {'unk_token': '<unk>'}
UpperCamelCase_ = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ = os.path.join(_UpperCAmelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = os.path.join(self.tmpdirname , 'rag_tokenizer' )
UpperCamelCase_ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCamelCase_ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_UpperCAmelCase )
rag_tokenizer.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = RagTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
UpperCamelCase_ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
UpperCamelCase_ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 706 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> int:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A_ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = ViTMSNModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Dict:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(2 )
UpperCamelCase_ = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
def _snake_case (__lowercase = 1000):
UpperCamelCase_ , UpperCamelCase_ = 1, 1
UpperCamelCase_ = 2
while True:
UpperCamelCase_ = 0
UpperCamelCase_ = fa + fa
UpperCamelCase_ , UpperCamelCase_ = fa, f
index += 1
for _ in str(__lowercase):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 707 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 618 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[Any] = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = ["""MobileNetV2FeatureExtractor"""]
snake_case__ : List[Any] = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 618 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """gptj"""
A_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Optional[int]:
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = n_inner
UpperCamelCase_ = rotary_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = attn_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_cache
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> Union[str, Any]:
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , _UpperCAmelCase ):
# TODO: how to do that better?
UpperCamelCase_ = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
UpperCamelCase_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
UpperCamelCase_ = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase_ , UpperCamelCase_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase_ = seqlen + 2
UpperCamelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase_ = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
UpperCamelCase_ = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase_ = ordered_inputs['attention_mask'].dtype
UpperCamelCase_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13
| 709 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase )}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID )
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 710 |
from typing import Any
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validation(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# Creates data structures and fill initial step
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for state in states_space:
UpperCamelCase_ = observations_space[0]
UpperCamelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowercase)):
UpperCamelCase_ = observations_space[o]
UpperCamelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
# Update probabilities and pointers dicts
UpperCamelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase_ = arg_max
# The final observation
UpperCamelCase_ = observations_space[len(__lowercase) - 1]
# argmax for given final observation
UpperCamelCase_ = ''
UpperCamelCase_ = -1
for k_state in states_space:
UpperCamelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase_ = probability
UpperCamelCase_ = k_state
UpperCamelCase_ = arg_max
# Process pointers backwards
UpperCamelCase_ = last_state
UpperCamelCase_ = []
for o in range(len(__lowercase) - 1 , -1 , -1):
result.append(__lowercase)
UpperCamelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
_validate_not_empty(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
_validate_lists(__lowercase , __lowercase)
_validate_dicts(
__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def _snake_case (__lowercase , __lowercase):
_validate_list(__lowercase , 'observations_space')
_validate_list(__lowercase , 'states_space')
def _snake_case (__lowercase , __lowercase):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list"""
raise ValueError(__lowercase)
else:
for x in _object:
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(__lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , ):
_validate_dict(__lowercase , 'initial_probabilities' , __lowercase)
_validate_nested_dict(__lowercase , 'transition_probabilities')
_validate_nested_dict(__lowercase , 'emission_probabilities')
def _snake_case (__lowercase , __lowercase):
_validate_dict(_object , __lowercase , __lowercase)
for x in _object.values():
_validate_dict(__lowercase , __lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = False):
if not isinstance(_object , __lowercase):
UpperCamelCase_ = f"""{var_name} must be a dict"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object):
UpperCamelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(__lowercase)
if not all(isinstance(__lowercase , __lowercase) for x in _object.values()):
UpperCamelCase_ = 'nested dictionary ' if nested else ''
UpperCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowercase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 618 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _snake_case (__lowercase):
if isinstance(__lowercase , collections.abc.Iterable):
return x
return (x, x)
@require_flax
class _a :
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCAmelCase , _UpperCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = after_output[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
UpperCamelCase_ = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = to_atuple(vision_model.config.image_size )
UpperCamelCase_ = to_atuple(vision_model.config.patch_size )
UpperCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_ = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
pt_model.to(_UpperCAmelCase )
pt_model.eval()
# prepare inputs
UpperCamelCase_ = inputs_dict
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCamelCase_ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
pt_model_loaded.to(_UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCamelCase_ = fx_state
self.check_pt_flax_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_inputs_dict.pop('vision_config' )
UpperCamelCase_ = config_inputs_dict.pop('text_config' )
UpperCamelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.check_equivalence_flax_to_pt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.get_pretrained_model_and_inputs()
UpperCamelCase_ = model_a(**_UpperCAmelCase )
UpperCamelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model_a(**_UpperCAmelCase )
UpperCamelCase_ = after_outputs[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-5 )
@require_flax
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_UpperCAmelCase , text_from_pt=_UpperCAmelCase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = FlaxViTModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = FlaxViTModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_UpperCAmelCase , text_from_pt=_UpperCAmelCase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = FlaxCLIPVisionModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = FlaxCLIPVisionModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _UpperCAmelCase , atol=1e-3 ) )
| 711 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
snake_case__ : Tuple = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
snake_case__ : List[Any] = json.load(f)
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return FSMTTokenizer.from_pretrained(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase_ = f"""facebook/wmt19-{pair}"""
UpperCamelCase_ = self.get_tokenizer(_UpperCAmelCase )
UpperCamelCase_ = self.get_model(_UpperCAmelCase )
UpperCamelCase_ = bleu_data[pair]['src']
UpperCamelCase_ = bleu_data[pair]['tgt']
UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
UpperCamelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase_ = tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
UpperCamelCase_ = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
print(_UpperCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase )
| 712 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
| 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case (__lowercase , __lowercase , __lowercase = "x" , __lowercase = 10**-10 , __lowercase = 1 , ):
UpperCamelCase_ = symbols(__lowercase)
UpperCamelCase_ = lambdify(__lowercase , __lowercase)
UpperCamelCase_ = lambdify(__lowercase , diff(__lowercase , __lowercase))
UpperCamelCase_ = starting_point
while True:
if diff_function(__lowercase) != 0:
UpperCamelCase_ = prev_guess - multiplicity * func(__lowercase) / diff_function(
__lowercase)
else:
raise ZeroDivisionError('Could not find root') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
UpperCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 618 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """altclip_text_model"""
def __init__( self , _UpperCAmelCase=250002 , _UpperCAmelCase=1024 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=514 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-05 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=768 , **_UpperCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = project_dim
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """altclip_vision_model"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=512 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=32 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1.0 , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = projection_dim
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
UpperCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """altclip"""
A_ = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=768 , _UpperCAmelCase=2.6_5_9_2 , **_UpperCAmelCase ) -> List[str]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
UpperCamelCase_ = kwargs.pop('text_config_dict' , _UpperCAmelCase )
UpperCamelCase_ = kwargs.pop('vision_config_dict' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCamelCase_ = {}
# This is the complete result when using `text_config_dict`.
UpperCamelCase_ = AltCLIPTextConfig(**_UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCamelCase_ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCamelCase_ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCamelCase_ = {}
# This is the complete result when using `vision_config_dict`.
UpperCamelCase_ = AltCLIPVisionConfig(**_UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCamelCase_ = {
str(_UpperCAmelCase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCamelCase_ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCamelCase_ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCamelCase_ = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
UpperCamelCase_ = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
UpperCamelCase_ = AltCLIPTextConfig(**_UpperCAmelCase )
UpperCamelCase_ = AltCLIPVisionConfig(**_UpperCAmelCase )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = logit_scale_init_value
UpperCamelCase_ = 1.0
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 714 |
def _snake_case (__lowercase = 1000):
UpperCamelCase_ , UpperCamelCase_ = 1, 1
UpperCamelCase_ = 2
while True:
UpperCamelCase_ = 0
UpperCamelCase_ = fa + fa
UpperCamelCase_ , UpperCamelCase_ = fa, f
index += 1
for _ in str(__lowercase):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 618 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = processor(text=_UpperCAmelCase )
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 715 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
snake_case__ : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
snake_case__ : Tuple = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = RealmTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = strip_accents
UpperCamelCase_ = tokenize_chinese_chars
UpperCamelCase_ = normalizer_class(**_UpperCAmelCase )
UpperCamelCase_ = do_lower_case
def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = PaddingStrategy.MAX_LENGTH
UpperCamelCase_ = text
UpperCamelCase_ = kwargs.pop('text_pair' , _UpperCAmelCase )
UpperCamelCase_ = kwargs.pop('return_tensors' , _UpperCAmelCase )
UpperCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_UpperCAmelCase ):
if batch_text_pair is not None:
UpperCamelCase_ = batch_text_pair[idx]
else:
UpperCamelCase_ = None
UpperCamelCase_ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = encoded_candidates.get('input_ids' )
UpperCamelCase_ = encoded_candidates.get('attention_mask' )
UpperCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_UpperCAmelCase )
UpperCamelCase_ = {key: item for key, item in output_data.items() if len(_UpperCAmelCase ) != 0}
return BatchEncoding(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Union[str, Any]:
UpperCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 618 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : int = 1_6
snake_case__ : int = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# Initialize accelerator
UpperCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
UpperCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather((predictions, batch['labels']))
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowercase) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case__ : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 618 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , ) -> List[str]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FlaxViTModel(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ = (self.patch_size, self.patch_size)
UpperCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = FlaxViTForImageClassification(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = FlaxViTForImageClassification(_UpperCAmelCase )
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> None:
UpperCamelCase_ = FlaxViTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_ = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained('google/vit-base-patch16-224' )
UpperCamelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 717 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Any:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 0 |
def _snake_case (__lowercase):
if len(__lowercase) < 2:
return collection
def circle_sort_util(__lowercase , __lowercase , __lowercase) -> bool:
UpperCamelCase_ = False
if low == high:
return swapped
UpperCamelCase_ = low
UpperCamelCase_ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase_ , UpperCamelCase_ = (
collection[right],
collection[left],
)
UpperCamelCase_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase_ , UpperCamelCase_ = (
collection[right + 1],
collection[left],
)
UpperCamelCase_ = True
UpperCamelCase_ = low + int((high - low) / 2)
UpperCamelCase_ = circle_sort_util(__lowercase , __lowercase , __lowercase)
UpperCamelCase_ = circle_sort_util(__lowercase , mid + 1 , __lowercase)
return swapped or left_swap or right_swap
UpperCamelCase_ = True
while is_not_sorted is True:
UpperCamelCase_ = circle_sort_util(__lowercase , 0 , len(__lowercase) - 1)
return collection
if __name__ == "__main__":
snake_case__ : int = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 718 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git_vision_model"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = hidden_act
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
UpperCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """git"""
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
UpperCamelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
UpperCamelCase_ = GitVisionConfig(**_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = num_image_with_embedding
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 618 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : str = {"""vocab_file""": """spiece.model"""}
snake_case__ : List[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
snake_case__ : Optional[int] = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> Tuple:
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = []
UpperCamelCase_ = ''
UpperCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
UpperCamelCase_ = True
UpperCamelCase_ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
UpperCamelCase_ = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> str:
UpperCamelCase_ = kwargs.pop('use_source_tokenizer' , _UpperCAmelCase )
UpperCamelCase_ = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase_ = []
UpperCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
UpperCamelCase_ = []
sub_texts.append(_UpperCAmelCase )
else:
current_sub_text.append(_UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase_ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(_UpperCAmelCase ) )
else:
UpperCamelCase_ = ''.join(_UpperCAmelCase )
UpperCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase_ = self.clean_up_tokenization(_UpperCAmelCase )
return clean_text
else:
return text
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 719 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = processor(text=_UpperCAmelCase )
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 618 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ) -> Tuple:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 2
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = DeiTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ):
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
UpperCamelCase_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase_ = model(_UpperCAmelCase )
| 720 |
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case__ : str = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
for attribute in key.split('.'):
UpperCamelCase_ = getattr(__lowercase , __lowercase)
if weight_type is not None:
UpperCamelCase_ = getattr(__lowercase , __lowercase).shape
else:
UpperCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""")
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = []
UpperCamelCase_ = fairseq_model.state_dict()
UpperCamelCase_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(__lowercase)[0].split('.')[-2]
UpperCamelCase_ = mapped_key.replace('*' , __lowercase)
if "weight_g" in name:
UpperCamelCase_ = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_ = 'weight'
else:
UpperCamelCase_ = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
continue
if not is_used:
unused_weights.append(__lowercase)
logger.warning(f"""Unused weights: {unused_weights}""")
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = full_name.split('conv_layers.')[-1]
UpperCamelCase_ = name.split('.')
UpperCamelCase_ = int(items[0])
UpperCamelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(__lowercase)
@torch.no_grad()
def _snake_case (__lowercase , __lowercase , __lowercase=None):
# load the pre-trained checkpoints
UpperCamelCase_ = torch.load(__lowercase)
UpperCamelCase_ = WavLMConfigOrig(checkpoint['cfg'])
UpperCamelCase_ = WavLMOrig(__lowercase)
model.load_state_dict(checkpoint['model'])
model.eval()
if config_path is not None:
UpperCamelCase_ = WavLMConfig.from_pretrained(__lowercase)
else:
UpperCamelCase_ = WavLMConfig()
UpperCamelCase_ = WavLMModel(__lowercase)
recursively_load_weights(__lowercase , __lowercase)
hf_wavlm.save_pretrained(__lowercase)
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case__ : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 721 |
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> Union[str, Any]:
# we need a list not a string, so do something to change the type
lowerCAmelCase_ : int = arr.split(''',''' )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = [int(self.array[0] )] * len(self.array )
lowerCAmelCase_ : Tuple = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase_ : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase_ : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCAmelCase : Dict =input("""please input some numbers:""")
_UpperCAmelCase : Union[str, Any] =SubArray(whole_array)
_UpperCAmelCase : List[Any] =array.solve_sub_array()
print(("""the results is:""", re)) | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """speech_to_text"""
SCREAMING_SNAKE_CASE__ : Any = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=1_0_0_0_0 , __lowercase=1_2 , __lowercase=2_0_4_8 , __lowercase=4 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=4 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=2 , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=6_0_0_0 , __lowercase=1_0_2_4 , __lowercase=2 , __lowercase=(5, 5) , __lowercase=1_0_2_4 , __lowercase=8_0 , __lowercase=1 , **__lowercase , ) -> List[Any]:
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : Tuple = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : Any = encoder_attention_heads
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : str = decoder_layers
lowerCAmelCase_ : List[Any] = decoder_attention_heads
lowerCAmelCase_ : int = dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = activation_function
lowerCAmelCase_ : Tuple = init_std
lowerCAmelCase_ : List[str] = encoder_layerdrop
lowerCAmelCase_ : Optional[int] = decoder_layerdrop
lowerCAmelCase_ : Dict = use_cache
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Dict = max_source_positions
lowerCAmelCase_ : Dict = max_target_positions
lowerCAmelCase_ : Tuple = num_conv_layers
lowerCAmelCase_ : int = list(__lowercase )
lowerCAmelCase_ : Any = conv_channels
lowerCAmelCase_ : Optional[int] = input_feat_per_channel
lowerCAmelCase_ : Any = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , ) | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """speech_to_text_2"""
SCREAMING_SNAKE_CASE__ : int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Any = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=1_0_0_0_0 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=4 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=2 , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_0_2_4 , **__lowercase , ) -> str:
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : Any = decoder_ffn_dim
lowerCAmelCase_ : Any = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Union[str, Any] = attention_dropout
lowerCAmelCase_ : Optional[int] = activation_dropout
lowerCAmelCase_ : Dict = activation_function
lowerCAmelCase_ : str = init_std
lowerCAmelCase_ : Tuple = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = use_cache
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : List[str] = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , ) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Any ={
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """marian"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=5_8_1_0_1 , __lowercase=None , __lowercase=1_0_2_4 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=True , __lowercase=True , __lowercase="gelu" , __lowercase=1_0_2_4 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=5_8_1_0_0 , __lowercase=False , __lowercase=5_8_1_0_0 , __lowercase=0 , __lowercase=0 , __lowercase=True , **__lowercase , ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Union[str, Any] = decoder_vocab_size or vocab_size
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : List[Any] = encoder_ffn_dim
lowerCAmelCase_ : Optional[int] = encoder_layers
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[int] = decoder_layers
lowerCAmelCase_ : Union[str, Any] = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = dropout
lowerCAmelCase_ : str = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Optional[Any] = init_std
lowerCAmelCase_ : Optional[int] = encoder_layerdrop
lowerCAmelCase_ : Tuple = decoder_layerdrop
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Any = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ : Tuple = {0: '''batch'''}
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase_ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : int = self.num_layers
for i in range(__lowercase ):
lowerCAmelCase_ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : int = super().outputs
else:
lowerCAmelCase_ : Dict = super(__lowercase , self ).outputs
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.num_layers
for i in range(__lowercase ):
lowerCAmelCase_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase_ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
lowerCAmelCase_ : int = seq_length if not self.use_past else 1
lowerCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : Any = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : str = common_inputs['''input_ids'''].shape
lowerCAmelCase_ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.num_attention_heads
lowerCAmelCase_ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : int = decoder_seq_length + 3
lowerCAmelCase_ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : Tuple = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
lowerCAmelCase_ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.num_layers
lowerCAmelCase_ : List[str] = min(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = max(__lowercase , __lowercase ) - min_num_layers
lowerCAmelCase_ : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
lowerCAmelCase_ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[int] = seqlen + 2
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.num_layers
lowerCAmelCase_ , lowerCAmelCase_ : int = self.num_attention_heads
lowerCAmelCase_ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : List[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase_ : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
lowerCAmelCase_ : Tuple = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Dict = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[int] = tokenizer.num_special_tokens_to_add(__lowercase )
lowerCAmelCase_ : int = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : Optional[int] = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Optional[Any] = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
lowerCAmelCase_ : Optional[Any] = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def lowercase_ ( self ) -> float:
return 1e-4 | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
lowerCAmelCase_ : Tuple = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase_ : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCAmelCase_ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase_ : Dict = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , )-> float:
return mean(
function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_ ) ) for _ in range(lowerCAmelCase_ ) ) * (max_value - min_value)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 )-> None:
def identity_function(lowerCAmelCase_ ) -> float:
return x
lowerCAmelCase_ : Optional[Any] = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
def function_to_integrate(lowerCAmelCase_ ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase_ : List[Any] = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : int = multiprocessing.Manager()
lowerCAmelCase_ : Optional[int] = manager.list()
lowerCAmelCase_ : Union[str, Any] = multiprocessing.Process(target=lowerCAmelCase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase_ : List[Any] = shutil.rmtree
lowerCAmelCase_ : List[Any] = os.rmdir
lowerCAmelCase_ : int = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase_ : Tuple = {}
with swallow_io():
with time_limit(lowerCAmelCase_ ):
exec(lowerCAmelCase_ , lowerCAmelCase_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowerCAmelCase_ : Optional[Any] = rmtree
lowerCAmelCase_ : Any = rmdir
lowerCAmelCase_ : Optional[int] = chdir
@contextlib.contextmanager
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
def signal_handler(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase_ )
signal.signal(signal.SIGALRM , lowerCAmelCase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase_ ):
with contextlib.redirect_stderr(lowerCAmelCase_ ):
with redirect_stdin(lowerCAmelCase_ ):
yield
@contextlib.contextmanager
def lowerCAmelCase ( )-> Optional[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase_ ):
yield dirname
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
pass
class snake_case__( io.StringIO ):
'''simple docstring'''
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
raise OSError
def lowercase_ ( self , *__lowercase , **__lowercase ) -> List[Any]:
raise OSError
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
raise OSError
def lowercase_ ( self , *__lowercase , **__lowercase ) -> str:
return False
class snake_case__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """stdin"""
@contextlib.contextmanager
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
if root == ".":
yield
return
lowerCAmelCase_ : Dict = os.getcwd()
os.chdir(lowerCAmelCase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_=None )-> Any:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = None
import os
lowerCAmelCase_ : Tuple = '''1'''
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : int = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Dict = None
import shutil
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : List[str] = None
import subprocess
lowerCAmelCase_ : Union[str, Any] = None # type: ignore
lowerCAmelCase_ : int = None
import sys
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : str = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : int = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ : int = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : int = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Any = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
lowerCAmelCase_ : Dict = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
lowerCAmelCase_ : List[str] = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 ) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : Union[str, Any] = str(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = [n]
for i in range(1 , len(lowerCAmelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if len(str(lowerCAmelCase_ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ):
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 11 )-> list[int]:
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Optional[int] = 13
while len(lowerCAmelCase_ ) != count:
if validate(lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = list_truncated_nums(lowerCAmelCase_ )
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase_ )
num += 2
return list_truncated_primes
def lowerCAmelCase ( )-> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""") | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[Any] = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : str = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
lowerCAmelCase_ : List[str] = do_thumbnail
lowerCAmelCase_ : str = do_align_axis
lowerCAmelCase_ : Any = do_pad
lowerCAmelCase_ : Any = do_normalize
lowerCAmelCase_ : Optional[Any] = image_mean
lowerCAmelCase_ : str = image_std
def lowercase_ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowercase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowercase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def lowercase_ ( self ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Optional[int] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 2_000_000 )-> int:
lowerCAmelCase_ : List[Any] = [0 for i in range(n + 1 )]
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Optional[int] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase_ ):
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : Tuple = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : int = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase_ : str = VideoClassificationPipeline(model=__lowercase , image_processor=__lowercase , top_k=2 )
lowerCAmelCase_ : Dict = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
for example in examples:
lowerCAmelCase_ : List[Any] = video_classifier(__lowercase )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
@require_torch
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase_ : Dict = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} )
lowerCAmelCase_ : Dict = pipeline(
'''video-classification''' , model=__lowercase , feature_extractor=__lowercase , frame_sampling_rate=4 )
lowerCAmelCase_ : Any = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase_ : Dict = video_classifier(__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase_ : Optional[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowercase_ ( self ) -> str:
pass | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__lowercase , )
assert hasattr(self , '''env''' )
def lowercase_ ( self , __lowercase=1 ) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowercase_ ( self , __lowercase ) -> List[str]:
TrainingJobAnalytics(__lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ ( self ) -> int:
# create estimator
lowerCAmelCase_ : Optional[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __lowercase ) | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_UpperCAmelCase : Any =["""bert-base-uncased""", """bert-base-cased"""]
_UpperCAmelCase : Dict ="""hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class snake_case__( tf.keras.Model ):
'''simple docstring'''
def __init__( self , __lowercase ) -> Any:
super().__init__()
lowerCAmelCase_ : Any = tokenizer
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
lowerCAmelCase_ : Any = TFAutoModel.from_config(__lowercase )
def lowercase_ ( self , __lowercase ) -> Tuple:
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase )
lowerCAmelCase_ : Optional[Any] = self.bert(**__lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
super().setUp()
lowerCAmelCase_ : Any = [
BertTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCAmelCase_ : Any = [TFBertTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__lowercase , use_fast_bert_tokenizer=__lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase_ : int = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCAmelCase_ : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Union[str, Any] = tokenizer(__lowercase , return_tensors='''tf''' , padding='''longest''' )
lowerCAmelCase_ : List[Any] = tf_tokenizer(__lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Tuple = tf_tokenizer(self.paired_sentences )
lowerCAmelCase_ : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : int = tf.function(__lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Optional[int] = tf.constant(__lowercase )
lowerCAmelCase_ : Any = compiled_tokenizer(__lowercase )
lowerCAmelCase_ : Tuple = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Any = ModelToSave(tokenizer=__lowercase )
lowerCAmelCase_ : Dict = tf.convert_to_tensor(self.test_sentences )
lowerCAmelCase_ : Dict = model(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : str = Path(__lowercase ) / '''saved.model'''
model.save(__lowercase )
lowerCAmelCase_ : Tuple = tf.keras.models.load_model(__lowercase )
lowerCAmelCase_ : Dict = loaded_model(__lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=2 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=1_0_0_0 , ) -> str:
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : str = num_choices
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : int = range_bbox
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ : Optional[int] = bbox[i, j, 3]
lowerCAmelCase_ : str = bbox[i, j, 1]
lowerCAmelCase_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ : List[Any] = bbox[i, j, 2]
lowerCAmelCase_ : Any = bbox[i, j, 0]
lowerCAmelCase_ : Dict = t
lowerCAmelCase_ : Any = tf.convert_to_tensor(__lowercase )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Any = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Optional[int] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : str = TFLayoutLMModel(config=__lowercase )
lowerCAmelCase_ : str = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : List[Any] = model(__lowercase , __lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase , __lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Dict = TFLayoutLMForMaskedLM(config=__lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Dict = TFLayoutLMForSequenceClassification(config=__lowercase )
lowerCAmelCase_ : Union[str, Any] = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = TFLayoutLMForTokenClassification(config=__lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = TFLayoutLMForQuestionAnswering(config=__lowercase )
lowerCAmelCase_ : Dict = model(__lowercase , __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Any = config_and_inputs
lowerCAmelCase_ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[str] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = TFLayoutLMModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def lowercase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def lowercase_ ( self ) -> Dict:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = TFLayoutLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase ( )-> List[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
lowerCAmelCase_ : Dict = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase_ : List[str] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : int = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : str = model(input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
# test the sequence output on [0, :3, :3]
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase_ : Any = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __lowercase , atol=1e-3 ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
# initialize model with randomly initialized sequence classification head
lowerCAmelCase_ : str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Optional[int] = model(
input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase_ : List[str] = outputs.loss
lowerCAmelCase_ : Optional[int] = (2,)
self.assertEqual(loss.shape , __lowercase )
# test the shape of the logits
lowerCAmelCase_ : List[str] = outputs.logits
lowerCAmelCase_ : Union[str, Any] = (2, 2)
self.assertEqual(logits.shape , __lowercase )
@slow
def lowercase_ ( self ) -> List[Any]:
# initialize model with randomly initialized token classification head
lowerCAmelCase_ : int = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Optional[Any] = model(
input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
# test the shape of the logits
lowerCAmelCase_ : Tuple = outputs.logits
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , __lowercase )
@slow
def lowercase_ ( self ) -> Tuple:
# initialize model with randomly initialized token classification head
lowerCAmelCase_ : Dict = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase_ : Any = model(input_ids=__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
# test the shape of the logits
lowerCAmelCase_ : Optional[int] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , __lowercase )
self.assertEqual(outputs.end_logits.shape , __lowercase ) | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any ={"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""GLPNFeatureExtractor"""]
_UpperCAmelCase : Tuple =["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : str = k_size // 2
lowerCAmelCase_ , lowerCAmelCase_ : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase_ : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase_ ) + square(lowerCAmelCase_ )) / (2 * square(lowerCAmelCase_ )) )
return g
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase_ : Union[str, Any] = height - k_size + 1
lowerCAmelCase_ : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase_ : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase_ : Optional[int] = 0
for i, j in product(range(lowerCAmelCase_ ) , range(lowerCAmelCase_ ) ):
lowerCAmelCase_ : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase_ : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase_ : Tuple = gen_gaussian_kernel(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = ravel(lowerCAmelCase_ )
# reshape and get the dst image
lowerCAmelCase_ : Dict = dot(lowerCAmelCase_ , lowerCAmelCase_ ).reshape(lowerCAmelCase_ , lowerCAmelCase_ ).astype(lowerCAmelCase_ )
return dst
if __name__ == "__main__":
# read original image
_UpperCAmelCase : List[Any] =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_UpperCAmelCase : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_UpperCAmelCase : Optional[Any] =gaussian_filter(gray, 3, sigma=1)
_UpperCAmelCase : List[Any] =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey() | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
lowerCAmelCase_ : int = original_name.split('''.''' )[0]
lowerCAmelCase_ : Optional[Any] = key.split('''.''' )
lowerCAmelCase_ : Dict = int(key_list[key_list.index(lowerCAmelCase_ ) - 2] )
lowerCAmelCase_ : List[str] = int(key_list[key_list.index(lowerCAmelCase_ ) - 1] )
lowerCAmelCase_ : List[Any] = orig_block_num - offset
lowerCAmelCase_ : List[Any] = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : Dict = OrderedDict()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowerCAmelCase_ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase_ : Union[str, Any] = key[: key.find('''proj''' )]
lowerCAmelCase_ : int = key.replace(lowerCAmelCase_ , f"""patch_embeddings.{total_embed_found}.""" )
lowerCAmelCase_ : Optional[Any] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase_ : List[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowerCAmelCase_ : int = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowerCAmelCase_ : List[str] = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowerCAmelCase_ : Any = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowerCAmelCase_ : Optional[int] = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowerCAmelCase_ : Union[str, Any] = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowerCAmelCase_ : Optional[Any] = replace_key_with_offset(lowerCAmelCase_ , lowerCAmelCase_ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowerCAmelCase_ : List[str] = key.replace('''head''' , '''classifier''' )
lowerCAmelCase_ : int = value
return new_state_dict
def lowerCAmelCase ( )-> Union[str, Any]:
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : Optional[int] = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : int = model_name[-3:]
lowerCAmelCase_ : Optional[Any] = 1_000
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Optional[int] = (1, 1_000)
# set config attributes
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ : Any = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase_ : int = [2, 2, 6, 2]
lowerCAmelCase_ : Union[str, Any] = [64, 128, 320, 512]
lowerCAmelCase_ : List[str] = 4.0
lowerCAmelCase_ : Tuple = 0.9
elif size == "s24":
lowerCAmelCase_ : Tuple = [4, 4, 12, 4]
lowerCAmelCase_ : Dict = [64, 128, 320, 512]
lowerCAmelCase_ : List[Any] = 4.0
lowerCAmelCase_ : Optional[int] = 0.9
elif size == "s36":
lowerCAmelCase_ : Union[str, Any] = [6, 6, 18, 6]
lowerCAmelCase_ : Union[str, Any] = [64, 128, 320, 512]
lowerCAmelCase_ : Dict = 4.0
lowerCAmelCase_ : Any = 1e-6
lowerCAmelCase_ : Optional[Any] = 0.9
elif size == "m36":
lowerCAmelCase_ : Any = [6, 6, 18, 6]
lowerCAmelCase_ : List[str] = [96, 192, 384, 768]
lowerCAmelCase_ : int = 4.0
lowerCAmelCase_ : Tuple = 1e-6
lowerCAmelCase_ : Any = 0.95
elif size == "m48":
lowerCAmelCase_ : Optional[Any] = [8, 8, 24, 8]
lowerCAmelCase_ : Optional[int] = [96, 192, 384, 768]
lowerCAmelCase_ : List[str] = 4.0
lowerCAmelCase_ : Any = 1e-6
lowerCAmelCase_ : Tuple = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
lowerCAmelCase_ : Dict = PoolFormerImageProcessor(crop_pct=lowerCAmelCase_ )
# Prepare image
lowerCAmelCase_ : str = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
lowerCAmelCase_ : Dict = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase_ : Any = rename_keys(lowerCAmelCase_ )
# create HuggingFace model and load state dict
lowerCAmelCase_ : Any = PoolFormerForImageClassification(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Define image processor
lowerCAmelCase_ : Dict = PoolFormerImageProcessor(crop_pct=lowerCAmelCase_ )
lowerCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowerCAmelCase_ : List[str] = model(lowerCAmelCase_ )
lowerCAmelCase_ : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase_ : List[Any] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowerCAmelCase_ : str = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowerCAmelCase_ : str = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowerCAmelCase_ : Dict = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowerCAmelCase_ : Tuple = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : int =argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
from itertools import permutations
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase_ : Union[str, Any] = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> int:
return sum(
int(''''''.join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : str ={
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict =["""ConditionalDetrFeatureExtractor"""]
_UpperCAmelCase : Union[str, Any] =["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any =logging.get_logger(__name__)
_UpperCAmelCase : Dict ="""▁"""
_UpperCAmelCase : Optional[int] ={"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_UpperCAmelCase : List[Any] ={
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_UpperCAmelCase : int ={
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_UpperCAmelCase : int ={
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids"]
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = RESOURCE_FILES_NAMES
def __init__( self , __lowercase , __lowercase=None , __lowercase=False , __lowercase="utf8" , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : Any = sentencepiece_model_ckpt
lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase_ : Union[str, Any] = self.load_vocab(filepath=__lowercase )
else:
lowerCAmelCase_ : Any = {self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase_ : str = {v: k for k, v in self.vocab.items()}
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
if text is None:
return None
lowerCAmelCase_ : Any = self.tokenize(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = '''''', []
for i, ch in enumerate(__lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase_ : Dict = self.SP_CHAR_MAPPING.get(__lowercase )
else:
lowerCAmelCase_ : Any = unicodedata.normalize('''NFKC''' , __lowercase )
if self.is_whitespace(__lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase_ : Dict = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase_ : Dict = token[1:]
lowerCAmelCase_ : Union[str, Any] = text[offset:].index(__lowercase ) + offset
lowerCAmelCase_ : Any = start + len(__lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase_ : Optional[int] = end
return token_mapping
@property
def lowercase_ ( self ) -> Dict:
return len(self.vocab )
def lowercase_ ( self ) -> Dict:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.__dict__.copy()
lowerCAmelCase_ : str = None
return state
def __setstate__( self , __lowercase ) -> int:
lowerCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase_ ( self , __lowercase ) -> List[str]:
return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=6_4 , __lowercase=0.1 ) -> Optional[Any]:
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowerCAmelCase_ : int = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowerCAmelCase_ : Optional[Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowerCAmelCase_ : List[str] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.EncodeAsPieces(__lowercase )
else:
lowerCAmelCase_ : Optional[int] = self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : str = []
for pi, piece in enumerate(__lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowercase ) and pi != 0:
new_pieces.append(__lowercase )
continue
else:
continue
lowerCAmelCase_ : Optional[Any] = 0
for i, chunk in enumerate(__lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowercase )
lowerCAmelCase_ : str = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase_ : Tuple = i
if len(__lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase_ ( self , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : Dict = self.convert_ids_to_tokens(__lowercase )
lowerCAmelCase_ : Any = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) )
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
return self.reverse_vocab.get(__lowercase , self.unk_token )
def lowercase_ ( self , __lowercase , __lowercase=None ) -> Tuple:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase_ ( self , __lowercase , __lowercase=None ) -> str:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=False ) -> Optional[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3)
def lowercase_ ( self , __lowercase ) -> Any:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase_ ( self , __lowercase ) -> Dict:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase_ ( self , __lowercase ) -> Optional[int]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase_ ( self , __lowercase ) -> List[str]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowercase ) == 1:
lowerCAmelCase_ : Tuple = unicodedata.category(__lowercase )
if cat == "Zs":
return True
return False
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : int = {}
with io.open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowercase ):
lowerCAmelCase_ : Optional[Any] = line.rstrip('''\n''' )
lowerCAmelCase_ : List[str] = int(__lowercase )
return token_to_idx
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
lowerCAmelCase_ : Optional[int] = 0
if os.path.isdir(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase_ : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase_ : Optional[int] = token_index
writer.write(token + '''\n''' )
index += 1
lowerCAmelCase_ : Optional[int] = os.path.join(__lowercase , '''sentencepiece.bpe.model''' )
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (vocab_file,) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
from ... import PretrainedConfig
_UpperCAmelCase : List[Any] ={
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
SCREAMING_SNAKE_CASE__ : Tuple = """nezha"""
def __init__( self , __lowercase=2_1_1_2_8 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=6_4 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0.1 , __lowercase=0 , __lowercase=2 , __lowercase=3 , __lowercase=True , **__lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : Tuple = max_relative_position
lowerCAmelCase_ : Optional[Any] = type_vocab_size
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = use_cache | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_UpperCAmelCase : List[str] ="""CompVis/stable-diffusion-v1-1"""
_UpperCAmelCase : str ="""CompVis/stable-diffusion-v1-2"""
_UpperCAmelCase : Union[str, Any] ="""CompVis/stable-diffusion-v1-3"""
_UpperCAmelCase : List[str] ="""CompVis/stable-diffusion-v1-4"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Tuple:
super()._init_()
lowerCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ ( self ) -> Dict[str, Any]:
return {k: getattr(self , __lowercase ) for k in self.config.keys() if not k.startswith('''_''' )}
def lowercase_ ( self , __lowercase = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def lowercase_ ( self ) -> int:
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Optional[Any]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Dict:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Tuple:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> Any:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , **__lowercase , ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ : List[str] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ : Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ : Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase ):
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
@slow
def lowercase_ ( self ) -> List[Any]:
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase )
lowerCAmelCase_ : List[str] = FlaxRobertaModel.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
def lowercase_ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self ) -> List[str]:
with self.assertRaisesRegex(
__lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained(__lowercase , revision='''aaaaaa''' )
def lowercase_ ( self ) -> Tuple:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
lowerCAmelCase_ : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self ) -> Tuple:
with self.assertRaisesRegex(__lowercase , '''Use `from_pt=True` to load this model''' ):
lowerCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=4 , ) -> List[str]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : str = use_attention_mask
lowerCAmelCase_ : Optional[int] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_choices
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_attention_mask:
lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : int = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = FlaxRoFormerModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__lowercase )
lowerCAmelCase_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
@require_flax
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCAmelCase_ : List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ : Any = model(__lowercase )[0]
lowerCAmelCase_ : Optional[Any] = 5_0_0_0_0
lowerCAmelCase_ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) ) | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : Dict = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase_ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowerCAmelCase_ : List[Any] = torch.load(hf_hub_download(repo_id=lowerCAmelCase_ , filename='''pytorch_model.bin''' ) )
lowerCAmelCase_ : Optional[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowerCAmelCase_ : Dict = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowerCAmelCase_ : Union[str, Any] = tensor_value
lowerCAmelCase_ : int = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase_ , config=lowerCAmelCase_ , state_dict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
# convert tokenizer
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : str =parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """xlm"""
SCREAMING_SNAKE_CASE__ : List[str] = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __lowercase=3_0_1_4_5 , __lowercase=2_0_4_8 , __lowercase=1_2 , __lowercase=1_6 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=1 , __lowercase=True , __lowercase=5_1_2 , __lowercase=2_0_4_8**-0.5 , __lowercase=1e-12 , __lowercase=0.02 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=3 , __lowercase=5 , __lowercase=True , __lowercase="first" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , __lowercase=5 , __lowercase=5 , __lowercase=0 , __lowercase=0 , __lowercase=2 , __lowercase=0 , **__lowercase , ) -> Any:
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : List[Any] = emb_dim
lowerCAmelCase_ : Dict = n_layers
lowerCAmelCase_ : int = n_heads
lowerCAmelCase_ : str = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : str = gelu_activation
lowerCAmelCase_ : Optional[Any] = sinusoidal_embeddings
lowerCAmelCase_ : Optional[Any] = causal
lowerCAmelCase_ : List[Any] = asm
lowerCAmelCase_ : Dict = n_langs
lowerCAmelCase_ : Dict = use_lang_emb
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = bos_index
lowerCAmelCase_ : str = eos_index
lowerCAmelCase_ : str = pad_index
lowerCAmelCase_ : Optional[int] = unk_index
lowerCAmelCase_ : str = mask_index
lowerCAmelCase_ : Tuple = is_encoder
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : List[Any] = embed_init_std
lowerCAmelCase_ : Any = init_std
lowerCAmelCase_ : List[Any] = summary_type
lowerCAmelCase_ : Optional[Any] = summary_use_proj
lowerCAmelCase_ : Tuple = summary_activation
lowerCAmelCase_ : int = summary_proj_to_labels
lowerCAmelCase_ : Union[str, Any] = summary_first_dropout
lowerCAmelCase_ : Dict = start_n_top
lowerCAmelCase_ : str = end_n_top
lowerCAmelCase_ : Optional[Any] = mask_token_id
lowerCAmelCase_ : int = lang_id
if "n_words" in kwargs:
lowerCAmelCase_ : Any = kwargs['''n_words''']
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , **__lowercase )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : List[str] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowerCAmelCase_ : Tuple = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Tuple = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_ , '''dataset_info.json''' ) )
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
lowerCAmelCase_ : Any = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase_ : Tuple = yaml.safe_dump(lowerCAmelCase_ )
lowerCAmelCase_ : int = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : Optional[Any] = DatasetInfo()
lowerCAmelCase_ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[str] = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase_ : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_ , '''README.md''' ) ) | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] ={
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" )-> List[Any]:
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCAmelCase_ : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Optional[int] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCAmelCase_ : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowerCAmelCase_ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> Any:
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowerCAmelCase_ : Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowerCAmelCase_ : Optional[Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ : List[str] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowerCAmelCase_ : int = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCAmelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> Dict:
lowerCAmelCase_ : Optional[int] = traverse_util.flatten_dict(variables['''target'''] )
lowerCAmelCase_ : List[Any] = {'''/'''.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , lowerCAmelCase_ )
lowerCAmelCase_ : Dict = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''attention''' )
lowerCAmelCase_ : Any = layer_norm
lowerCAmelCase_ : int = k.T
lowerCAmelCase_ : Tuple = o.T
lowerCAmelCase_ : Dict = q.T
lowerCAmelCase_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[Any] = wi[0].T
lowerCAmelCase_ : Optional[int] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : Dict = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , '''encoder''' ).T
lowerCAmelCase_ : Tuple = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCAmelCase_ : Dict = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , '''encoder''' ).T
lowerCAmelCase_ : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''self_attention''' )
lowerCAmelCase_ : Optional[Any] = layer_norm
lowerCAmelCase_ : str = k.T
lowerCAmelCase_ : Optional[Any] = o.T
lowerCAmelCase_ : Optional[Any] = q.T
lowerCAmelCase_ : int = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
lowerCAmelCase_ : Tuple = layer_norm
lowerCAmelCase_ : List[str] = k.T
lowerCAmelCase_ : Dict = o.T
lowerCAmelCase_ : int = q.T
lowerCAmelCase_ : int = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' , lowerCAmelCase_ )
lowerCAmelCase_ : Dict = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : Union[str, Any] = wi[1].T
else:
lowerCAmelCase_ : Union[str, Any] = wi.T
lowerCAmelCase_ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , '''decoder''' ).T
lowerCAmelCase_ : int = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCAmelCase_ : Tuple = state_dict['''shared.weight''']
return state_dict
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , )-> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Tuple = UMTaEncoderModel(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Tuple = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : Dict =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase=1_0_2_4 , __lowercase=1_0_2_4 , __lowercase=3.6 ) -> Any:
lowerCAmelCase_ : str = tokenizer
lowerCAmelCase_ : Optional[int] = tokenizer.bos_token_id
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : List[str] = seq_length
lowerCAmelCase_ : int = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> List[str]:
lowerCAmelCase_ : List[str] = iter(self.dataset )
lowerCAmelCase_ : List[str] = True
while more_examples:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__lowercase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCAmelCase_ : str = False
break
lowerCAmelCase_ : Optional[Any] = tokenizer(__lowercase , truncation=__lowercase )['''input_ids''']
lowerCAmelCase_ : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__lowercase ) , self.seq_length ):
lowerCAmelCase_ : int = all_token_ids[i : i + self.seq_length]
if len(__lowercase ) == self.seq_length:
yield torch.tensor(__lowercase )
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
lowerCAmelCase_ : Dict = {'''streaming''': True}
lowerCAmelCase_ : Dict = load_dataset(args.dataset_name , split='''train''' , **lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = ConstantLengthDataset(lowerCAmelCase_ , lowerCAmelCase_ , seq_length=args.seq_length )
lowerCAmelCase_ : Dict = DataLoader(lowerCAmelCase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
model.eval()
lowerCAmelCase_ : Any = []
for step, batch in enumerate(lowerCAmelCase_ ):
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCAmelCase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCAmelCase_ : Tuple = torch.mean(torch.cat(lowerCAmelCase_ ) )
try:
lowerCAmelCase_ : Dict = torch.exp(lowerCAmelCase_ )
except OverflowError:
lowerCAmelCase_ : Any = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_UpperCAmelCase : Union[str, Any] =Accelerator()
# Parse configuration
_UpperCAmelCase : Union[str, Any] =HfArgumentParser(EvaluationArguments)
_UpperCAmelCase : Any =parser.parse_args()
set_seed(args.seed)
# Logging
_UpperCAmelCase : int =logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
_UpperCAmelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_UpperCAmelCase : Any =create_dataloader(args)
# Prepare everything with our `accelerator`.
_UpperCAmelCase , _UpperCAmelCase : List[str] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""") | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=6_4 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> int:
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : Union[str, Any] = num_choices
lowerCAmelCase_ : Any = scope
lowerCAmelCase_ : List[str] = vocab_size - 1
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Tuple = None
if self.use_labels:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ) -> Optional[int]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : str = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : str = GPTNeoXModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : List[Any] = model(__lowercase , attention_mask=__lowercase )
lowerCAmelCase_ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = GPTNeoXModel(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : int = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : Any = GPTNeoXForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = GPTNeoXForQuestionAnswering(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Union[str, Any] = GPTNeoXForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = GPTNeoXForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : List[Any] = GPTNeoXForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
lowerCAmelCase_ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , output_hidden_states=__lowercase )
lowerCAmelCase_ : Any = output_from_no_past['''hidden_states'''][0]
lowerCAmelCase_ : Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Any = GPTNeoXModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__lowercase , hidden_size=6_4 , num_attention_heads=8 )
def lowercase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ : Dict = None
self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase , __lowercase , __lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def lowercase_ ( self ) -> Optional[int]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowercase_ ( self , __lowercase ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ : int = GPTNeoXModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
lowerCAmelCase_ : List[Any] = original_model(__lowercase ).last_hidden_state
lowerCAmelCase_ : Tuple = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase_ : Optional[Any] = GPTNeoXModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
lowerCAmelCase_ : List[Any] = scaled_model(__lowercase ).last_hidden_state
lowerCAmelCase_ : List[str] = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
lowerCAmelCase_ : Tuple = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowercase )
lowerCAmelCase_ : List[str] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowercase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCAmelCase_ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
lowerCAmelCase_ : Optional[Any] = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=2_0 )
lowerCAmelCase_ : List[str] = tokenizer.batch_decode(__lowercase )[0]
self.assertEqual(__lowercase , __lowercase ) | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
import os
import sys
import unittest
_UpperCAmelCase : int =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Union[str, Any] =os.path.join(git_repo_path, """src""", """diffusers""")
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Any = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__lowercase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowerCAmelCase_ : List[str] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__lowercase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowerCAmelCase_ : Dict = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__lowercase , '''torch_and_transformers_and_onnx''' )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __lowercase )
self.assertIn('''torch_and_transformers''' , __lowercase )
self.assertIn('''flax_and_transformers''' , __lowercase )
self.assertIn('''torch_and_transformers_and_onnx''' , __lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__lowercase , '''\nCONSTANT = None\n''' )
lowerCAmelCase_ : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__lowercase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowerCAmelCase_ : List[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowerCAmelCase_ : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Any = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowerCAmelCase_ : Dict = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __lowercase ) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_UpperCAmelCase : List[Any] =False
_UpperCAmelCase : Union[str, Any] =True
_UpperCAmelCase : List[Any] =False
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_UpperCAmelCase : Dict =parser.parse_args()
_UpperCAmelCase : List[str] ={
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
_UpperCAmelCase : Dict ={
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
_UpperCAmelCase : Optional[int] ="""""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
_UpperCAmelCase : Optional[int] =reader.read()
_UpperCAmelCase : List[Any] =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
_UpperCAmelCase : Any =UNetaDModel(**config)
else:
_UpperCAmelCase : Tuple =UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
_UpperCAmelCase : Optional[Any] =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_UpperCAmelCase : Tuple =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_UpperCAmelCase : Tuple =config[key]
del config[key]
_UpperCAmelCase : str =[k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
_UpperCAmelCase : str =[k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
_UpperCAmelCase : List[str] =torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
_UpperCAmelCase : Tuple ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
_UpperCAmelCase : Dict =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
_UpperCAmelCase : Dict =param_value
_UpperCAmelCase : List[str] =True
if not has_changed:
_UpperCAmelCase : List[Any] =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case__( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""speech"""]
def __init__( self , *__lowercase , **__lowercase ) -> Dict:
requires_backends(self , ['''speech'''] )
class snake_case__( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""speech"""]
def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]:
requires_backends(self , ['''speech'''] ) | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase : int =["""gpt2"""]
_UpperCAmelCase : Dict ="""gpt2"""
if is_tf_available():
class snake_case__( tf.Module ):
'''simple docstring'''
def __init__( self , __lowercase ) -> List[Any]:
super().__init__()
lowerCAmelCase_ : int = tokenizer
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[int] = TFGPTaLMHeadModel.from_config(__lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def lowercase_ ( self , __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = self.tokenizer(__lowercase )
lowerCAmelCase_ : Optional[Any] = tokenized['''input_ids'''].to_tensor()
lowerCAmelCase_ : List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase_ : List[Any] = self.model(input_ids=__lowercase , attention_mask=__lowercase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
super().setUp()
lowerCAmelCase_ : List[Any] = [GPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase_ : Dict = [TFGPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase_ : int = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCAmelCase_ : Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase_ : int = tokenizer([test_inputs] , return_tensors='''tf''' )
lowerCAmelCase_ : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase_ : List[str] = python_outputs[key].numpy()
lowerCAmelCase_ : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase_ ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : int = tf.function(__lowercase )
for test_inputs in self.test_sentences:
lowerCAmelCase_ : int = tf.constant(__lowercase )
lowerCAmelCase_ : Any = compiled_tokenizer(__lowercase )
lowerCAmelCase_ : Tuple = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : int = ModelToSave(tokenizer=__lowercase )
lowerCAmelCase_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : Optional[int] = model.serving(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : str = Path(__lowercase ) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving} )
lowerCAmelCase_ : Optional[int] = tf.saved_model.load(__lowercase )
lowerCAmelCase_ : Dict = loaded_model.signatures['''serving_default'''](__lowercase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase_ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : Any = tf_tokenizer(__lowercase ) # Build model with some sample inputs
lowerCAmelCase_ : Dict = tf_tokenizer.get_config()
lowerCAmelCase_ : int = TFGPTaTokenizer.from_config(__lowercase )
lowerCAmelCase_ : List[Any] = model_from_config(__lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase_ : Optional[int] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ : List[str] = tf_tokenizer(__lowercase , max_length=__lowercase )
lowerCAmelCase_ : Union[str, Any] = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Union[str, Any] ={
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_UpperCAmelCase : Any ={
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_UpperCAmelCase : str ="""▁"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowercase , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ : Tuple = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
lowerCAmelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : Tuple = do_lower_case
lowerCAmelCase_ : List[Any] = remove_space
lowerCAmelCase_ : Optional[int] = keep_accents
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
return len(self.sp_model )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase_ : Any = self.__dict__.copy()
lowerCAmelCase_ : str = None
return state
def __setstate__( self , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __lowercase ) -> int:
if self.remove_space:
lowerCAmelCase_ : Union[str, Any] = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase_ : Optional[Any] = inputs
lowerCAmelCase_ : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase_ : int = unicodedata.normalize('''NFKD''' , __lowercase )
lowerCAmelCase_ : str = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
lowerCAmelCase_ : int = outputs.lower()
return outputs
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = self.preprocess_text(__lowercase )
lowerCAmelCase_ : Optional[int] = self.sp_model.encode(__lowercase , out_type=__lowercase )
lowerCAmelCase_ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase_ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : List[str] = cur_pieces[1:]
else:
lowerCAmelCase_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return self.sp_model.PieceToId(__lowercase )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
return self.sp_model.IdToPiece(__lowercase )
def lowercase_ ( self , __lowercase ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = ''''''
lowerCAmelCase_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(__lowercase )
lowerCAmelCase_ : List[str] = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,) | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """mvp"""
SCREAMING_SNAKE_CASE__ : List[str] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=5_0_2_6_7 , __lowercase=1_0_2_4 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=1_2 , __lowercase=4_0_9_6 , __lowercase=1_6 , __lowercase=0.0 , __lowercase=0.0 , __lowercase="gelu" , __lowercase=1_0_2_4 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=0.0 , __lowercase=False , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=True , __lowercase=2 , __lowercase=2 , __lowercase=False , __lowercase=1_0_0 , __lowercase=8_0_0 , **__lowercase , ) -> Tuple:
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Dict = d_model
lowerCAmelCase_ : int = encoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : List[Any] = encoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = dropout
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : List[str] = activation_dropout
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : Optional[int] = init_std
lowerCAmelCase_ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase_ : int = decoder_layerdrop
lowerCAmelCase_ : Any = classifier_dropout
lowerCAmelCase_ : int = use_cache
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Dict = use_prompt
lowerCAmelCase_ : Tuple = prompt_length
lowerCAmelCase_ : str = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __lowercase ):
lowerCAmelCase_ : int = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' ) | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowercase_ ( __lowercase ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def lowercase_ ( self ) -> int:
raise NotImplementedError() | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_UpperCAmelCase : Any =logging.getLogger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase=-1 ) -> Dict:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase_ : int = label_idx
def lowercase_ ( self , __lowercase , __lowercase ) -> List[InputExample]:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[int] = mode.value
lowerCAmelCase_ : Optional[int] = os.path.join(__lowercase , f"""{mode}.txt""" )
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Union[str, Any] = []
with open(__lowercase , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Optional[Any] = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__lowercase , labels=__lowercase ) )
guid_index += 1
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Optional[Any] = []
else:
lowerCAmelCase_ : int = line.split(''' ''' )
words.append(splits[0] )
if len(__lowercase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__lowercase , labels=__lowercase ) )
return examples
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(__lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ : Optional[Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(__lowercase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def lowercase_ ( self , __lowercase ) -> List[str]:
if path:
with open(__lowercase , '''r''' ) as f:
lowerCAmelCase_ : Dict = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : List[str] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowercase_ ( self , __lowercase ) -> List[str]:
if path:
with open(__lowercase , '''r''' ) as f:
lowerCAmelCase_ : Dict = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : Optional[int] = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> List[InputExample]:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Dict = mode.value
lowerCAmelCase_ : str = os.path.join(__lowercase , f"""{mode}.txt""" )
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : List[str] = []
with open(__lowercase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(__lowercase ):
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Dict = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(__lowercase ) == len(__lowercase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__lowercase , labels=__lowercase ) )
guid_index += 1
return examples
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Dict = 0
for sentence in parse_incr(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase_ : Optional[Any] = ''''''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__lowercase )
example_id += 1
def lowercase_ ( self , __lowercase ) -> List[str]:
if path:
with open(__lowercase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE__ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE__ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE__ : Optional[Tuple[torch.FloatTensor]] = None
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=5_1_2 , __lowercase="cls" , __lowercase=False , __lowercase=True , **__lowercase , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Any = project_dim
lowerCAmelCase_ : Union[str, Any] = pooler_fn
lowerCAmelCase_ : List[str] = learn_encoder
lowerCAmelCase_ : int = use_attention_mask
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [r"""pooler""", r"""logit_scale"""]
SCREAMING_SNAKE_CASE__ : Any = [r"""position_ids""", r"""predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE__ : Any = """roberta"""
SCREAMING_SNAKE_CASE__ : str = RobertaSeriesConfig
def __init__( self , __lowercase ) -> Tuple:
super().__init__(__lowercase )
lowerCAmelCase_ : Dict = XLMRobertaModel(__lowercase )
lowerCAmelCase_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
lowerCAmelCase_ : Optional[Any] = getattr(__lowercase , '''has_pre_transformation''' , __lowercase )
if self.has_pre_transformation:
lowerCAmelCase_ : int = nn.Linear(config.hidden_size , config.project_dim )
lowerCAmelCase_ : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> int:
lowerCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : int = self.base_model(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_attentions=__lowercase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowercase , )
if self.has_pre_transformation:
lowerCAmelCase_ : Dict = outputs['''hidden_states'''][-2]
lowerCAmelCase_ : Tuple = self.pre_LN(__lowercase )
lowerCAmelCase_ : int = self.transformation_pre(__lowercase )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCAmelCase_ : Optional[int] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class snake_case__( UpperCAmelCase__, UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowercase = 1_2_8 , __lowercase = 2_5_6 , __lowercase = 20_00.0 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 6_4 , __lowercase = 2_0_4_8 , __lowercase = 0.1 , ) -> List[str]:
super().__init__()
lowerCAmelCase_ : Optional[int] = nn.Sequential(
nn.Linear(__lowercase , d_model * 4 , bias=__lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowercase ) , nn.SiLU() , )
lowerCAmelCase_ : Tuple = nn.Embedding(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
lowerCAmelCase_ : Tuple = nn.Dropout(p=__lowercase )
lowerCAmelCase_ : str = nn.ModuleList()
for lyr_num in range(__lowercase ):
# FiLM conditional T5 decoder
lowerCAmelCase_ : str = DecoderLayer(d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase )
self.decoders.append(__lowercase )
lowerCAmelCase_ : str = TaLayerNorm(__lowercase )
lowerCAmelCase_ : str = nn.Dropout(p=__lowercase )
lowerCAmelCase_ : Optional[Any] = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase_ : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCAmelCase_ : Optional[int] = self.conditioning_emb(__lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase_ : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase_ : Union[str, Any] = torch.broadcast_to(
torch.arange(__lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCAmelCase_ : List[str] = self.position_encoding(__lowercase )
lowerCAmelCase_ : Dict = self.continuous_inputs_projection(__lowercase )
inputs += position_encodings
lowerCAmelCase_ : List[str] = self.dropout(__lowercase )
# decoder: No padding present.
lowerCAmelCase_ : List[str] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase_ : Tuple = [(x, self.encoder_decoder_mask(__lowercase , __lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase_ : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCAmelCase_ : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCAmelCase_ : Any = lyr(
__lowercase , conditioning_emb=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )[0]
lowerCAmelCase_ : int = self.decoder_norm(__lowercase )
lowerCAmelCase_ : Tuple = self.post_dropout(__lowercase )
lowerCAmelCase_ : Any = self.spec_out(__lowercase )
return spec_out
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=1e-6 ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : str = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , dropout_rate=__lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , dropout_rate=__lowercase , layer_norm_epsilon=__lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase , layer_norm_epsilon=__lowercase ) )
def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Dict:
lowerCAmelCase_ : Any = self.layer[0](
__lowercase , conditioning_emb=__lowercase , attention_mask=__lowercase , )
if encoder_hidden_states is not None:
lowerCAmelCase_ : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase_ : List[Any] = self.layer[1](
__lowercase , key_value_states=__lowercase , attention_mask=__lowercase , )
# Apply Film Conditional Feed Forward layer
lowerCAmelCase_ : Optional[Any] = self.layer[-1](__lowercase , __lowercase )
return (hidden_states,)
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
super().__init__()
lowerCAmelCase_ : Optional[int] = TaLayerNorm(__lowercase )
lowerCAmelCase_ : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowercase )
lowerCAmelCase_ : List[str] = Attention(query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , out_bias=__lowercase , scale_qk=__lowercase )
lowerCAmelCase_ : Optional[Any] = nn.Dropout(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
lowerCAmelCase_ : int = self.layer_norm(__lowercase )
if conditioning_emb is not None:
lowerCAmelCase_ : Dict = self.FiLMLayer(__lowercase , __lowercase )
# Self-attention block
lowerCAmelCase_ : List[Any] = self.attention(__lowercase )
lowerCAmelCase_ : str = hidden_states + self.dropout(__lowercase )
return hidden_states
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
super().__init__()
lowerCAmelCase_ : Any = Attention(query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , out_bias=__lowercase , scale_qk=__lowercase )
lowerCAmelCase_ : List[str] = TaLayerNorm(__lowercase , eps=__lowercase )
lowerCAmelCase_ : Union[str, Any] = nn.Dropout(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=None , ) -> Any:
lowerCAmelCase_ : Any = self.layer_norm(__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.attention(
__lowercase , encoder_hidden_states=__lowercase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCAmelCase_ : Tuple = hidden_states + self.dropout(__lowercase )
return layer_output
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ : Dict = TaDenseGatedActDense(d_model=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase )
lowerCAmelCase_ : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowercase )
lowerCAmelCase_ : Dict = TaLayerNorm(__lowercase , eps=__lowercase )
lowerCAmelCase_ : Optional[Any] = nn.Dropout(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=None ) -> Optional[int]:
lowerCAmelCase_ : Any = self.layer_norm(__lowercase )
if conditioning_emb is not None:
lowerCAmelCase_ : Tuple = self.film(__lowercase , __lowercase )
lowerCAmelCase_ : int = self.DenseReluDense(__lowercase )
lowerCAmelCase_ : List[str] = hidden_states + self.dropout(__lowercase )
return hidden_states
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : Any = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
lowerCAmelCase_ : int = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
lowerCAmelCase_ : Dict = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
lowerCAmelCase_ : Tuple = nn.Dropout(__lowercase )
lowerCAmelCase_ : Optional[int] = NewGELUActivation()
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.act(self.wi_a(__lowercase ) )
lowerCAmelCase_ : List[Any] = self.wi_a(__lowercase )
lowerCAmelCase_ : int = hidden_gelu * hidden_linear
lowerCAmelCase_ : Dict = self.dropout(__lowercase )
lowerCAmelCase_ : Optional[Any] = self.wo(__lowercase )
return hidden_states
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1e-6 ) -> List[Any]:
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Parameter(torch.ones(__lowercase ) )
lowerCAmelCase_ : Dict = eps
def lowercase_ ( self , __lowercase ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase_ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowercase )
lowerCAmelCase_ : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase_ : List[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class snake_case__( nn.Module ):
'''simple docstring'''
def lowercase_ ( self , __lowercase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(__lowercase , 3.0 )) ))
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : Any = nn.Linear(__lowercase , out_features * 2 , bias=__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.scale_bias(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = torch.chunk(__lowercase , 2 , -1 )
lowerCAmelCase_ : Dict = x * (1 + scale) + shift
return x | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : str = False
@property
def lowercase_ ( self ) -> Any:
return 3_2
@property
def lowercase_ ( self ) -> List[str]:
return 3_2
@property
def lowercase_ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> Dict:
return 8
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase_ : int = CLIPVisionModel(__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase_ : int = PriorTransformer(**__lowercase )
return model
@property
def lowercase_ ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : Tuple = ShapERenderer(**__lowercase )
return model
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = self.dummy_prior
lowerCAmelCase_ : int = self.dummy_image_encoder
lowerCAmelCase_ : Optional[Any] = self.dummy_image_processor
lowerCAmelCase_ : List[str] = self.dummy_renderer
lowerCAmelCase_ : Optional[Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , )
lowerCAmelCase_ : Tuple = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Optional[Any]:
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = '''cpu'''
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[int] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
lowerCAmelCase_ : Optional[int] = output.images[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase_ : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = torch_device == '''cpu'''
lowerCAmelCase_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
lowerCAmelCase_ : Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : Dict = batch_size * [inputs[key]]
lowerCAmelCase_ : List[Any] = pipe(**__lowercase , num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase_ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase_ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : str = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
__lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase , __lowercase ) | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : int =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : str = to_pil_image(lowerCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = pil_image.size
lowerCAmelCase_ : Any = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type='''dict''' , config=lowerCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase_ : Dict = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
lowerCAmelCase_ : int = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : List[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : int = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase_ : str = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
lowerCAmelCase_ : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase_ : Any = get_size_dict(__lowercase )
lowerCAmelCase_ : Dict = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_value
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase_ : Tuple = apply_ocr
lowerCAmelCase_ : Dict = ocr_lang
lowerCAmelCase_ : Union[str, Any] = tesseract_config
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Union[str, Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase_ : List[str] = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[str] = size if size is not None else self.size
lowerCAmelCase_ : str = get_size_dict(__lowercase )
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase_ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase_ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase_ : int = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : List[Any] = [to_numpy_array(__lowercase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = []
for image in images:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : List[Any] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[str] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
lowerCAmelCase_ : str = words_batch
lowerCAmelCase_ : str = boxes_batch
return data | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.