code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( a ) -> Tuple:
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCAmelCase :
'''simple docstring'''
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Any ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ):
"""simple docstring"""
_A: List[str] = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_A: Optional[int] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_A: str = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A , _A: List[str] = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_A: Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_A: List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_A: Any = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Any = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_A: str = {'''vision_model''': vision_model, '''text_model''': text_model}
_A: str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_A: Optional[Any] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_A: str = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_A: List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_A: List[str] = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_A: Optional[int] = after_output[0]
_A: Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-3 )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A , _A: str = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_A: Union[str, Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
_A: int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_A: Any = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_A: Dict = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_A: str = to_atuple(vision_model.config.image_size )
_A: Any = to_atuple(vision_model.config.patch_size )
_A: Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_A: List[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_A: Optional[int] = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
pt_model.to(_lowerCAmelCase )
pt_model.eval()
# prepare inputs
_A: List[Any] = inputs_dict
_A: str = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_A: List[str] = pt_model(**_lowerCAmelCase ).to_tuple()
_A: List[Any] = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_A: int = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_A: int = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_A: List[str] = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
pt_model_loaded.to(_lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
_A: int = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_A: int = VisionTextDualEncoderModel(_lowerCAmelCase )
_A: Dict = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_A: Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_A: Optional[int] = fx_state
self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_A: Optional[Any] = VisionTextDualEncoderModel(_lowerCAmelCase )
_A: int = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
_A: Dict = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Any = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@is_pt_flax_cross_test
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = self.prepare_config_and_inputs()
_A: int = config_inputs_dict.pop('''vision_config''' )
_A: str = config_inputs_dict.pop('''text_config''' )
_A: Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.check_equivalence_flax_to_pt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@slow
def __magic_name__ ( self : str ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_pretrained_model_and_inputs()
_A: Any = model_a(**_lowerCAmelCase )
_A: Union[str, Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_A: Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_A: List[str] = model_a(**_lowerCAmelCase )
_A: Tuple = after_outputs[0]
_A: str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , )
_A: int = 1_3
_A: List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_A: Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_A: int = random_attention_mask([batch_size, 4] )
_A: List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[Any] = FlaxViTModel(_lowerCAmelCase )
_A: Optional[int] = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Union[str, Any] = FlaxViTModelTester(self )
_A: Union[str, Any] = FlaxBertModelTester(self )
_A: int = vit_model_tester.prepare_config_and_inputs()
_A: Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_A , _A: int = vision_config_and_inputs
_A , _A , _A , _A: Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_lowerCAmelCase , text_from_pt=_lowerCAmelCase , )
_A: List[Any] = 1_3
_A: List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_A: int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_A: Any = random_attention_mask([batch_size, 4] )
_A: Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Dict = FlaxCLIPVisionModel(_lowerCAmelCase )
_A: List[Any] = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = FlaxCLIPVisionModelTester(self )
_A: List[Any] = FlaxBertModelTester(self )
_A: Any = clip_model_tester.prepare_config_and_inputs()
_A: List[str] = bert_model_tester.prepare_config_and_inputs()
_A , _A: Dict = vision_config_and_inputs
_A , _A , _A , _A: str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_A: Optional[int] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_A: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A: Tuple = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='''np''' )
_A: List[str] = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_A: List[str] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1e-3 ) )
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase__ : Any = logging.getLogger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
"""simple docstring"""
_A: Any = False
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
if not self.initialized:
_A: Optional[int] = RagRetriever(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , )
_A: List[str] = True
def __magic_name__ ( self : str ):
"""simple docstring"""
self.retriever.index.init_index()
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A , _A: Optional[Any] = self.retriever._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , )
_A: Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for worker in self.retrieval_workers
] )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_A: List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_A , _A: List[Any] = ray.get(random_worker.retrieve.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
_A , _A: Tuple = self._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_SCREAMING_SNAKE_CASE )
@classmethod
def __magic_name__ ( cls : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
"""simple docstring"""
return super(_SCREAMING_SNAKE_CASE , cls ).get_tokenizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def __magic_name__ ( cls : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: int = kwargs.pop('''config''' , _SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_A: Optional[Any] = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
_A: str = rag_tokenizer.question_encoder
_A: Dict = rag_tokenizer.generator
if indexed_dataset is not None:
_A: List[Any] = '''custom'''
_A: Dict = CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE )
else:
_A: Dict = cls._build_index(_SCREAMING_SNAKE_CASE )
return cls(
_SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , retrieval_workers=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = ReformerTokenizer
__UpperCamelCase : Tuple = ReformerTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = False
__UpperCamelCase : Tuple = True
def __magic_name__ ( self : int ):
"""simple docstring"""
super().setUp()
_A: Dict = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Union[str, Any] = '<s>'
_A: Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_0 )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __magic_name__ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Any = self.get_tokenizer()
_A: List[str] = self.get_rust_tokenizer()
_A: Union[str, Any] = 'I was born in 92000, and this is falsé.'
_A: Dict = tokenizer.tokenize(lowerCAmelCase_ )
_A: Dict = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Any = self.get_rust_tokenizer()
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
_A: int = 'This is a simple input'
_A: List[Any] = ['This is a simple input 1', 'This is a simple input 2']
_A: List[Any] = ('This is a simple input', 'This is a pair')
_A: Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_A: Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
_A: int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_A: Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
_A: Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = 'Hello World!'
_A: Optional[Any] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_A: Union[str, Any] = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@require_torch
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_A: Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
_A: List[str] = ' '.join(lowerCAmelCase_ )
_A: int = self.big_tokenizer.encode_plus(lowerCAmelCase_ , return_tensors='''pt''' )
_A: Union[str, Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
_A: Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_A: List[Any] = encoded_sequence['input_ids'].shape
_A: int = ReformerModel(lowerCAmelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase_ )
model(**lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_A: Tuple = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=lowerCAmelCase_ , sequences=lowerCAmelCase_ , )
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)] )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: int = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
_A: Optional[int] = GenerationConfig.from_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , lowerCamelCase__ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = AutoConfig.from_pretrained('''gpt2''' )
_A: Tuple = GenerationConfig.from_model_config(lowerCamelCase__ )
_A: Tuple = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[str] = GenerationConfig()
_A: Optional[Any] = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_A: Dict = copy.deepcopy(lowerCamelCase__ )
_A: List[Any] = generation_config.update(**lowerCamelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase__ , {'''foo''': '''bar'''} )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[int] = GenerationConfig()
_A: Union[str, Any] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase__ )
_A: Optional[Any] = GenerationConfig.from_pretrained(lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_A: Dict = GenerationConfig.from_model_config(lowerCamelCase__ )
assert not hasattr(lowerCamelCase__ , '''foo''' ) # no new kwargs should be initialized if from config
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase__ )
self.assertEqual(default_config.num_beams , 1 )
_A: List[Any] = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
_A: Tuple = GenerationConfig.from_pretrained(lowerCamelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__ ( cls : Union[str, Any] ):
"""simple docstring"""
_A: int = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __magic_name__ ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Any = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_A: List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''test-generation-config''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_A: Tuple = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_A: Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_A: Dict = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
| 357
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : Any , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowercase__ )
_A: List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
_A: List[str] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_A: List[str] = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_A: Dict = get_size_dict(lowercase__ , param_name='''crop_size''' )
_A: str = do_resize
_A: Union[str, Any] = size
_A: Optional[Any] = resample
_A: Optional[int] = do_rescale
_A: Union[str, Any] = rescale_factor
_A: Any = do_center_crop
_A: str = crop_size
_A: List[str] = do_flip_channel_order
def __magic_name__ ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
_A: str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_A: Optional[Any] = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: Tuple = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def __magic_name__ ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
_A: Optional[Any] = do_resize if do_resize is not None else self.do_resize
_A: Any = resample if resample is not None else self.resample
_A: Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A: List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A: List[str] = size if size is not None else self.size
_A: str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
_A: Tuple = crop_size if crop_size is not None else self.crop_size
_A: Tuple = get_size_dict(lowercase__ , param_name='''crop_size''' )
_A: str = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_A: Dict = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
_A: str = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
_A: Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
_A: Union[str, Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A: Any = [self.flip_channel_order(image=lowercase__ ) for image in images]
_A: Dict = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Tuple] = None ):
"""simple docstring"""
_A: Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
_A: Union[str, Any] = target_sizes.numpy()
_A: Optional[Any] = []
for idx in range(len(lowercase__ ) ):
_A: Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase__ )
_A: int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
_A: Any = logits.argmax(dim=1 )
_A: List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : int = '''EncodecFeatureExtractor'''
__UpperCamelCase : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Optional[Any] = self.feature_extractor
_A: Dict = False
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase_ , language=lowerCAmelCase_ , no_timestamps=lowerCAmelCase_ )
def __call__( self : Dict , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
_A: Optional[int] = kwargs.pop('''audio''' , lowerCAmelCase_ )
_A: List[str] = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_A: Dict = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_A: Union[str, Any] = args[0]
_A: Tuple = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_A: Tuple = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if audio is not None:
_A: int = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_A: Optional[Any] = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_A: Optional[Any] = audio_inputs['padding_mask']
return inputs
def __magic_name__ ( self : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: str = kwargs.pop('''audio''' , lowerCAmelCase_ )
_A: Any = kwargs.pop('''padding_mask''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_A: Union[str, Any] = args[0]
_A: List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCAmelCase_ , padding_mask=lowerCAmelCase_ )
else:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional = None ):
"""simple docstring"""
_A: Dict = to_numpy(lowerCAmelCase_ )
_A: Union[str, Any] = audio_values.shape
if padding_mask is None:
return list(lowerCAmelCase_ )
_A: Any = to_numpy(lowerCAmelCase_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_A: Dict = seq_len - padding_mask.shape[-1]
_A: Tuple = 1 - self.feature_extractor.padding_value
_A: List[Any] = np.pad(lowerCAmelCase_ , ((0, 0), (0, difference)) , '''constant''' , constant_values=lowerCAmelCase_ )
_A: int = audio_values.tolist()
for i in range(lowerCAmelCase_ ):
_A: Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_A: Any = sliced_audio.reshape(lowerCAmelCase_ , -1 )
return audio_values
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 360
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 0
|
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase ( __lowercase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = '''openai/whisper-base'''
__UpperCamelCase : str = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__UpperCamelCase : Dict = '''transcriber'''
__UpperCamelCase : Any = WhisperProcessor
__UpperCamelCase : Union[str, Any] = WhisperForConditionalGeneration
__UpperCamelCase : Union[str, Any] = ['''audio''']
__UpperCamelCase : Optional[int] = ['''text''']
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 361
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase__ : int = HfArgumentParser(InitializationArguments)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase__ : List[str] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase__ : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase__ ( a ) -> Any:
_A: List[str] = EfficientNetConfig()
_A: List[Any] = CONFIG_MAP[model_name]['''hidden_dim''']
_A: str = CONFIG_MAP[model_name]['''width_coef''']
_A: Optional[int] = CONFIG_MAP[model_name]['''depth_coef''']
_A: int = CONFIG_MAP[model_name]['''image_size''']
_A: Union[str, Any] = CONFIG_MAP[model_name]['''dropout_rate''']
_A: Dict = CONFIG_MAP[model_name]['''dw_padding''']
_A: str = '''huggingface/label-files'''
_A: Any = '''imagenet-1k-id2label.json'''
_A: Dict = 10_00
_A: int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
_A: Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_A: List[Any] = idalabel
_A: Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A: Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def lowerCamelCase__ ( a ) -> List[str]:
_A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
_A: Optional[int] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=lowercase__ , )
return preprocessor
def lowerCamelCase__ ( a ) -> Optional[int]:
_A: Union[str, Any] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
_A: str = sorted(set(lowercase__ ) )
_A: int = len(lowercase__ )
_A: List[Any] = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
_A: List[str] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
_A: List[str] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
_A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_A: Tuple = '''efficientnet.''' + item[1]
_A: Union[str, Any] = '''classifier.weight'''
_A: Union[str, Any] = '''classifier.bias'''
return key_mapping
def lowerCamelCase__ ( a , a , a ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_A: List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_A: Union[str, Any] = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_A: Optional[int] = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_A: Any = torch.from_numpy(np.transpose(lowercase__ ) )
else:
_A: Any = torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def lowerCamelCase__ ( a , a , a , a ) -> Optional[int]:
_A: List[str] = model_classes[model_name](
include_top=lowercase__ , weights='''imagenet''' , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=10_00 , classifier_activation='''softmax''' , )
_A: Tuple = original_model.trainable_variables
_A: Dict = original_model.non_trainable_variables
_A: int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_A: Optional[int] = param.numpy()
_A: List[str] = list(tf_params.keys() )
# Load HuggingFace model
_A: List[str] = get_efficientnet_config(lowercase__ )
_A: Tuple = EfficientNetForImageClassification(lowercase__ ).eval()
_A: Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
_A: Any = rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
_A: int = convert_image_processor(lowercase__ )
_A: str = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_A: Optional[Any] = hf_model(**lowercase__ )
_A: Optional[int] = outputs.logits.detach().numpy()
# Original model inference
_A: Dict = False
_A: int = CONFIG_MAP[model_name]['''image_size''']
_A: Union[str, Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_A: Dict = image.img_to_array(lowercase__ )
_A: Optional[Any] = np.expand_dims(lowercase__ , axis=0 )
_A: str = original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
_A: List[str] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 363
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[Any] = tempfile.mkdtemp()
_A: Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A: List[Any] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
_A: Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : List[Any] , **lowerCAmelCase_ : str ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : int , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_A: Any = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
_A: Optional[int] = self.get_rust_tokenizer()
_A: Any = self.get_image_processor()
_A: Union[str, Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
_A: str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
_A: Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
_A: str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A: Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A: Union[str, Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
_A: Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Dict = self.get_image_processor()
_A: List[Any] = self.get_tokenizer()
_A: Optional[int] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_A: Tuple = self.prepare_image_inputs()
_A: int = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
_A: str = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: str = self.get_image_processor()
_A: str = self.get_tokenizer()
_A: Tuple = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_A: Tuple = "lower newer"
_A: int = processor(text=_SCREAMING_SNAKE_CASE )
_A: Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Any = self.get_image_processor()
_A: str = self.get_tokenizer()
_A: Union[str, Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_A: Any = "lower newer"
_A: Dict = self.prepare_image_inputs()
_A: Dict = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = self.get_image_processor()
_A: Any = self.get_tokenizer()
_A: Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_A: Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A: List[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
_A: str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = self.get_image_processor()
_A: str = self.get_tokenizer()
_A: List[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_A: Union[str, Any] = "lower newer"
_A: Optional[int] = self.prepare_image_inputs()
_A: Any = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Tuple = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__UpperCamelCase : bool = None
__UpperCamelCase : bool = None
class UpperCAmelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Dict = datasets.Audio()
__UpperCamelCase : List[str] = """audio"""
__UpperCamelCase : str = AudioFolderConfig
__UpperCamelCase : List[str] # definition at the bottom of the script
__UpperCamelCase : Tuple = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCAmelCase__ : Dict = AUDIO_EXTENSIONS
| 366
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = order
# a_{0} ... a_{k}
_A: str = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_A: Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_A: Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_A: int = [0.0] * self.order
def __magic_name__ ( self : int , lowerCAmelCase_ : list[float] , lowerCAmelCase_ : list[float] ):
"""simple docstring"""
if len(_UpperCAmelCase ) < self.order:
_A: List[Any] = [1.0, *a_coeffs]
if len(_UpperCAmelCase ) != self.order + 1:
_A: str = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != self.order + 1:
_A: Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
_A: Union[str, Any] = a_coeffs
_A: Dict = b_coeffs
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : float ):
"""simple docstring"""
_A: Optional[int] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_A: List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_A: List[str] = self.input_history[:-1]
_A: List[str] = self.output_history[:-1]
_A: int = sample
_A: Tuple = result
return result
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase__ ( a ) -> List[Any]:
for param in module.parameters():
_A: int = False
def lowerCamelCase__ ( ) -> Optional[int]:
_A: List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A: List[Any] = """mps"""
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: Any = plt.imshow(a )
fig.axes.get_xaxis().set_visible(a )
fig.axes.get_yaxis().set_visible(a )
plt.show()
def lowerCamelCase__ ( ) -> Dict:
_A: Tuple = datetime.now()
_A: Dict = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 368
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301
| 0
|
def lowerCamelCase__ ( a ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase__ ( a ) -> list[tuple[int, int]]:
_A: List[Any] = 0
_A: Optional[Any] = len(a__ ) # No of vertices in graph
_A: int = [0] * n
_A: str = [False] * n
def dfs(a , a , a , a ):
_A: Optional[Any] = True
_A: Optional[int] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a__ , a__ , a__ , id_ )
_A: Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_A: Any = min(low[at] , low[to] )
_A: Optional[int] = []
for i in range(a__ ):
if not visited[i]:
dfs(a__ , -1 , a__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
debug_launcher(test_script.main )
def __magic_name__ ( self : str ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( a ) -> str:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase__ ( a ) -> Dict:
_A: Optional[int] = credit_card_number
_A: Tuple = 0
_A: Tuple = len(a ) - 2
for i in range(a , -1 , -2 ):
# double the value of every second digit
_A: Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_A: List[Any] = cc_number[:i] + str(a ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase__ ( a ) -> Optional[int]:
_A: Optional[Any] = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(a ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(a ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(a ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 371
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase ( a_ ):
'''simple docstring'''
__UpperCamelCase : Dict = "Salesforce/blip-image-captioning-base"
__UpperCamelCase : Any = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
__UpperCamelCase : Tuple = "image_captioner"
__UpperCamelCase : List[Any] = AutoModelForVisionaSeq
__UpperCamelCase : Any = ["image"]
__UpperCamelCase : Tuple = ["text"]
def __init__( self : Tuple , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.pre_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )[0].strip()
| 350
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
| 0
|
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( a ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( a ) -> List[Any]:
_A: Tuple = []
_A: List[Any] = []
_A: List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
_A: Dict = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase__ ) , '''Postfix'''.center(UpperCamelCase__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: int = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
_A: Any = ''')''' # change "(" to ")"
elif infix[i] == ")":
_A: List[str] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase__ : Any = input('\nEnter an Infix Equation = ') # Input an Infix equation
UpperCAmelCase__ : Tuple = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 352
|
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase :
'''simple docstring'''
pass
| 353
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( a , a , a , a , a ) -> str:
# Load configuration defined in the metadata file
with open(_lowerCAmelCase ) as metadata_file:
_A: Dict = json.load(_lowerCAmelCase )
_A: List[Any] = LukeConfig(use_entity_aware_attention=_lowerCAmelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_A: List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )["module"]
# Load the entity vocab file
_A: List[Any] = load_original_entity_vocab(_lowerCAmelCase )
# add an entry for [MASK2]
_A: Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_A: Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_A: str = AddedToken('''<ent>''' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
_A: Any = AddedToken('''<ent2>''' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , '''tokenizer_config.json''' ) , '''r''' ) as f:
_A: Optional[int] = json.load(_lowerCAmelCase )
_A: Dict = "MLukeTokenizer"
with open(os.path.join(_lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
_A: Dict = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
# Initialize the embeddings of the special tokens
_A: List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_A: Optional[Any] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_A: Any = state_dict["embeddings.word_embeddings.weight"]
_A: str = word_emb[ent_init_index].unsqueeze(0 )
_A: Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
_A: int = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_A: List[str] = state_dict[bias_name]
_A: Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_A: List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
_A: Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_A: Dict = f"""encoder.layer.{layer_index}.attention.self."""
_A: Union[str, Any] = state_dict[prefix + matrix_name]
_A: Optional[int] = state_dict[prefix + matrix_name]
_A: Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_A: List[str] = state_dict["entity_embeddings.entity_embeddings.weight"]
_A: List[Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_A: int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_A: List[str] = state_dict["entity_predictions.bias"]
_A: List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_A: str = torch.cat([entity_prediction_bias, entity_mask_bias] )
_A: str = LukeForMaskedLM(config=_lowerCAmelCase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_A: str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_A: Dict = state_dict[key]
else:
_A: Optional[int] = state_dict[key]
_A: Optional[int] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if set(_lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_A: List[Any] = MLukeTokenizer.from_pretrained(_lowerCAmelCase , task='''entity_classification''' )
_A: Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_A: int = (0, 9)
_A: Dict = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_A: Any = model(**_lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_A: Dict = torch.Size((1, 33, 7_68) )
_A: List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_A: Union[str, Any] = torch.Size((1, 1, 7_68) )
_A: Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_A: Tuple = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
_A: Any = "Tokyo is the capital of <mask>."
_A: List[Any] = (24, 30)
_A: Dict = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_A: Any = model(**_lowerCAmelCase )
_A: Optional[Any] = encoding["input_ids"][0].tolist()
_A: List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_A: List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCAmelCase )
_A: int = outputs.entity_logits[0][0].argmax().item()
_A: Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowerCAmelCase ) )
model.save_pretrained(_lowerCAmelCase )
def lowerCamelCase__ ( a ) -> Any:
_A: Optional[Any] = ["[MASK]", "[PAD]", "[UNK]"]
_A: int = [json.loads(_lowerCAmelCase ) for line in open(_lowerCAmelCase )]
_A: str = {}
for entry in data:
_A: str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_A: str = entity_id
break
_A: Any = f"""{language}:{entity_name}"""
_A: List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[str] = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] = True , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[int] = PIL.Image.BICUBIC , lowerCAmelCase_ : int = True , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Any = 1 / 2_5_5 , lowerCAmelCase_ : str = True , lowerCAmelCase_ : Tuple = True , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_A: List[Any] = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_A: Optional[Any] = get_size_dict(lowerCamelCase_ )
_A: Optional[int] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_A: Union[str, Any] = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
_A: Any = do_resize
_A: Union[str, Any] = size
_A: int = resample
_A: Any = do_center_crop
_A: Optional[int] = crop_size
_A: Optional[int] = do_rescale
_A: Tuple = rescale_factor
_A: Dict = do_normalize
_A: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A: Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict = PIL.Image.BICUBIC , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
_A: Any = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return resize(
lowerCamelCase_ , size=(size['''height'''], size['''width''']) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int = None , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
_A: Tuple = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int = None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple = None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
_A: Optional[Any] = do_resize if do_resize is not None else self.do_resize
_A: Optional[int] = resample if resample is not None else self.resample
_A: int = do_center_crop if do_center_crop is not None else self.do_center_crop
_A: int = do_rescale if do_rescale is not None else self.do_rescale
_A: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: Dict = do_normalize if do_normalize is not None else self.do_normalize
_A: str = image_mean if image_mean is not None else self.image_mean
_A: Any = image_std if image_std is not None else self.image_std
_A: Dict = size if size is not None else self.size
_A: Tuple = get_size_dict(lowerCamelCase_ )
_A: List[str] = crop_size if crop_size is not None else self.crop_size
_A: Dict = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
_A: Optional[int] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A: Optional[int] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_A: str = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
_A: Dict = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
_A: Any = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_A: Union[str, Any] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_A: int = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_A: List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
from math import sqrt
def lowerCamelCase__ ( a ) -> bool:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
_A: Any = True
# 0 and 1 are none primes.
if number <= 1:
_A: Optional[int] = False
for divisor in range(2 , int(round(sqrt(__UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_A: Optional[Any] = False
break
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( a ) -> Any:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_A: int = list(range(2 , n + 1 ) )
_A: int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + 1 , len(__UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_A: int = 0
# filters actual prime numbers.
_A: Optional[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( a ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
_A: Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__UpperCAmelCase ):
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( a ) -> Union[str, Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
_A: List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
_A: Union[str, Any] = 2
_A: Any = number
if number == 0 or number == 1:
ans.append(__UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__UpperCAmelCase ):
while quotient != 1:
if is_prime(__UpperCAmelCase ) and (quotient % factor == 0):
ans.append(__UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( a ) -> Union[str, Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_A: Optional[int] = 0
# prime factorization of 'number'
_A: int = prime_factorization(__UpperCAmelCase )
_A: int = max(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( a ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_A: Tuple = 0
# prime factorization of 'number'
_A: Dict = prime_factorization(__UpperCAmelCase )
_A: Optional[int] = min(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( a ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( a ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( a ) -> Dict:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (number > 2) and is_even(__UpperCAmelCase )
), "'number' must been an int, even and > 2"
_A: Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_A: Tuple = get_prime_numbers(__UpperCAmelCase )
_A: int = len(__UpperCAmelCase )
# run variable for while-loops.
_A: List[Any] = 0
_A: Tuple = None
# exit variable. for break up the loops
_A: int = True
while i < len_pn and loop:
_A: Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_A: Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (len(__UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_A: Tuple = 0
while numbera != 0:
_A: List[str] = numbera % numbera
_A: Any = numbera
_A: Tuple = rest
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( a , a ) -> Optional[int]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_A: Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_A: List[Any] = prime_factorization(__UpperCAmelCase )
_A: Any = prime_factorization(__UpperCAmelCase )
elif numbera == 1 or numbera == 1:
_A: Tuple = []
_A: Tuple = []
_A: List[str] = max(__UpperCAmelCase , __UpperCAmelCase )
_A: Optional[Any] = 0
_A: int = 0
_A: List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_A: str = prime_fac_a.count(__UpperCAmelCase )
_A: Optional[Any] = prime_fac_a.count(__UpperCAmelCase )
for _ in range(max(__UpperCAmelCase , __UpperCAmelCase ) ):
ans *= n
else:
_A: List[str] = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_A: Tuple = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( a ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
_A: int = 0
_A: Dict = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and is_prime(
__UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( a , a ) -> str:
assert (
is_prime(__UpperCAmelCase ) and is_prime(__UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_A: List[Any] = p_number_a + 1 # jump to the next number
_A: Tuple = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(__UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(__UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( a ) -> str:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
_A: List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(__UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( a ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_A: Tuple = get_divisors(__UpperCAmelCase )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(__UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( a , a ) -> Optional[int]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_A: List[Any] = gcd(abs(__UpperCAmelCase ) , abs(__UpperCAmelCase ) )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( a ) -> Optional[int]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
_A: Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( a ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
_A: Dict = 0
_A: int = 1
_A: int = 1 # this will be return
for _ in range(n - 1 ):
_A: int = ans
ans += fiba
_A: Tuple = tmp
return ans
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
def lowerCamelCase__ ( a ) -> "list[int]":
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
_A: Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_A: str = 1
if upper_limit > 0:
_A: Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
UpperCAmelCase__ : int = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 357
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( a , a , a , ) -> Union[str, Any]:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : List[str] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( a , a , a , a ) -> Any:
_A: str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_A: Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowercase , )
is not None
):
_A: Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_A: List[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_A: str = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_A: str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_A: Optional[Any] = True
if not attribute_used:
_A: int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_A: Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_A: str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_A: Any = True
elif attribute.endswith('''_token_id''' ):
_A: int = True
# configuration class specific cases
if not case_allowed:
_A: Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_A: Dict = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( a ) -> Any:
_A: int = dict(inspect.signature(config_class.__init__ ).parameters )
_A: List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_A: Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_A: List[str] = {}
if len(config_class.attribute_map ) > 0:
_A: Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_A: Union[str, Any] = inspect.getsourcefile(_lowercase )
_A: str = os.path.dirname(_lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_A: List[Any] = [os.path.join(_lowercase , _lowercase ) for fn in os.listdir(_lowercase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_A: int = []
for path in modeling_paths:
if os.path.isfile(_lowercase ):
with open(_lowercase ) as fp:
modeling_sources.append(fp.read() )
_A: Optional[Any] = []
for config_param, default_value in zip(_lowercase , _lowercase ):
# `attributes` here is all the variant names for `config_param`
_A: Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowercase , _lowercase , _lowercase , _lowercase ):
unused_attributes.append(attributes[0] )
return sorted(_lowercase )
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_A: Dict = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda a : inspect.isclass(_lowercase )
and issubclass(_lowercase , _lowercase )
and inspect.getmodule(_lowercase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_A: List[Any] = check_config_attributes_being_used(_lowercase )
if len(_lowercase ) > 0:
_A: Tuple = unused_attributes
if len(_lowercase ) > 0:
_A: Union[str, Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_lowercase )
if __name__ == "__main__":
check_config_attributes()
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
UpperCAmelCase__ : str = "Tobias Carryer"
from time import time
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=int(time() ) ): # noqa: B008
"""simple docstring"""
_A: Any = multiplier
_A: List[str] = increment
_A: List[str] = modulo
_A: Tuple = seed
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase__ : Tuple = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 360
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( a , a , a = 1 , a = 1 , a = 1.0E4 , a = False , a = 1.0 , ) -> Union[str, Any]:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
_A: str = float(embedding_dim // 2 )
_A: Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_A: str = min_timescale * jnp.exp(jnp.arange(_a , dtype=jnp.floataa ) * -log_timescale_increment )
_A: Dict = jnp.expand_dims(_a , 1 ) * jnp.expand_dims(_a , 0 )
# scale embeddings
_A: Any = scale * emb
if flip_sin_to_cos:
_A: List[str] = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )] , axis=1 )
else:
_A: Any = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )] , axis=1 )
_A: Union[str, Any] = jnp.reshape(_a , [jnp.shape(_a )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : Dict = 32
__UpperCamelCase : Optional[int] = jnp.floataa
@nn.compact
def __call__( self : Any , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowerCamelCase_ )
_A: Any = nn.silu(lowerCamelCase_ )
_A: int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowerCamelCase_ )
return temb
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : Dict = 32
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = 1
@nn.compact
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 361
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=9_9 , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=6_4 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=5_1_2 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : str=1 , ):
"""simple docstring"""
_A: Any = parent
_A: Any = batch_size
_A: Union[str, Any] = seq_length
_A: Union[str, Any] = is_training
_A: Tuple = use_input_mask
_A: List[Any] = use_token_type_ids
_A: str = use_labels
_A: Optional[int] = vocab_size
_A: Optional[int] = hidden_size
_A: int = num_hidden_layers
_A: Union[str, Any] = num_attention_heads
_A: Optional[Any] = intermediate_size
_A: List[str] = hidden_act
_A: str = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: List[Any] = max_position_embeddings
_A: Optional[Any] = type_vocab_size
_A: Union[str, Any] = type_sequence_label_size
_A: Any = initializer_range
_A: Optional[int] = num_labels
_A: int = num_choices
_A: int = scope
_A: Tuple = q_groups
_A: Optional[int] = k_groups
_A: int = v_groups
_A: Optional[Any] = post_attention_groups
_A: Optional[int] = intermediate_groups
_A: Dict = output_groups
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: Union[str, Any] = None
if self.use_input_mask:
_A: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A: List[str] = None
_A: List[str] = None
_A: List[str] = None
if self.use_labels:
_A: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_A: List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = SqueezeBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_A: Optional[int] = model(snake_case__ , snake_case__ )
_A: Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[Any] = SqueezeBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_A: Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: List[str] = SqueezeBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_A: Union[str, Any] = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: List[str] = SqueezeBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_A: str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[Any] = self.num_labels
_A: List[str] = SqueezeBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_A: Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.num_choices
_A: Optional[int] = SqueezeBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_A: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A: str = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.prepare_config_and_inputs()
(_A): Tuple = config_and_inputs
_A: Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase : Any = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : int = True
__UpperCamelCase : Any = False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Union[str, Any] = SqueezeBertModelTester(self )
_A: Optional[Any] = ConfigTester(self , config_class=snake_case__ , dim=3_7 )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case__ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case__ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case__ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case__ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case__ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case__ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: Optional[int] = SqueezeBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
_A: Union[str, Any] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_A: Dict = model(snake_case__ )[0]
_A: int = torch.Size((1, 3) )
self.assertEqual(output.shape , snake_case__ )
_A: Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( a , a = None , a = None ) -> None:
if start is None:
_A: Tuple = 0
if end is None:
_A: Tuple = len(_lowercase ) - 1
if start >= end:
return
_A: Optional[Any] = (start + end) // 2
slowsort(_lowercase , _lowercase , _lowercase )
slowsort(_lowercase , mid + 1 , _lowercase )
if sequence[end] < sequence[mid]:
_A: Optional[Any] = sequence[mid], sequence[end]
slowsort(_lowercase , _lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 363
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = OpenAIGPTTokenizer
__UpperCamelCase : Dict = OpenAIGPTTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : str = False
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A: Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A: Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_A: Any = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_A: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
return "lower newer", "lower newer"
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_A: List[str] = '''lower'''
_A: Any = ['''low''', '''er</w>''']
_A: List[str] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_A: Any = tokens + ['''<unk>''']
_A: str = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Any = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_A: Tuple = '''This is a simple input'''
_A: Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_A: str = ('''This is a simple input''', '''This is a pair''')
_A: Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''cvt'''
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Dict=[7, 3, 3] , lowerCAmelCase_ : Union[str, Any]=[4, 2, 2] , lowerCAmelCase_ : int=[2, 1, 1] , lowerCAmelCase_ : Tuple=[6_4, 1_9_2, 3_8_4] , lowerCAmelCase_ : List[Any]=[1, 3, 6] , lowerCAmelCase_ : Union[str, Any]=[1, 2, 1_0] , lowerCAmelCase_ : Optional[Any]=[4.0, 4.0, 4.0] , lowerCAmelCase_ : Optional[int]=[0.0, 0.0, 0.0] , lowerCAmelCase_ : Optional[int]=[0.0, 0.0, 0.0] , lowerCAmelCase_ : List[str]=[0.0, 0.0, 0.1] , lowerCAmelCase_ : Any=[True, True, True] , lowerCAmelCase_ : List[Any]=[False, False, True] , lowerCAmelCase_ : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase_ : List[str]=[3, 3, 3] , lowerCAmelCase_ : List[Any]=[1, 1, 1] , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : List[str]=[1, 1, 1] , lowerCAmelCase_ : Dict=[1, 1, 1] , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-12 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: int = num_channels
_A: List[Any] = patch_sizes
_A: Tuple = patch_stride
_A: Dict = patch_padding
_A: Optional[Any] = embed_dim
_A: Union[str, Any] = num_heads
_A: List[str] = depth
_A: Dict = mlp_ratio
_A: Union[str, Any] = attention_drop_rate
_A: Union[str, Any] = drop_rate
_A: int = drop_path_rate
_A: Any = qkv_bias
_A: List[str] = cls_token
_A: Tuple = qkv_projection_method
_A: Dict = kernel_qkv
_A: List[Any] = padding_kv
_A: Any = stride_kv
_A: Union[str, Any] = padding_q
_A: Optional[int] = stride_q
_A: Tuple = initializer_range
_A: Optional[Any] = layer_norm_eps
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = '''unispeech'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Tuple=7_6_8 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Optional[Any]=1_2 , lowerCAmelCase_ : Dict=3_0_7_2 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : List[str]=1e-5 , lowerCAmelCase_ : List[str]="group" , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str=0.05 , lowerCAmelCase_ : str=1_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : List[Any]=3_2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[Any]=1_0_0 , lowerCAmelCase_ : Optional[Any]=2_5_6 , lowerCAmelCase_ : Optional[int]=2_5_6 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Tuple="mean" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : int=2_5_6 , lowerCAmelCase_ : Optional[Any]=8_0 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=0.5 , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
_A: Union[str, Any] = hidden_size
_A: Optional[Any] = feat_extract_norm
_A: Optional[int] = feat_extract_activation
_A: Dict = list(SCREAMING_SNAKE_CASE_ )
_A: str = list(SCREAMING_SNAKE_CASE_ )
_A: Any = list(SCREAMING_SNAKE_CASE_ )
_A: List[Any] = conv_bias
_A: Tuple = num_conv_pos_embeddings
_A: str = num_conv_pos_embedding_groups
_A: List[Any] = len(self.conv_dim )
_A: Tuple = num_hidden_layers
_A: Optional[int] = intermediate_size
_A: Optional[Any] = hidden_act
_A: str = num_attention_heads
_A: Tuple = hidden_dropout
_A: Dict = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[str] = feat_proj_dropout
_A: int = final_dropout
_A: Dict = layerdrop
_A: Optional[int] = layer_norm_eps
_A: Optional[int] = initializer_range
_A: Optional[int] = num_ctc_classes
_A: Any = vocab_size
_A: int = do_stable_layer_norm
_A: Optional[int] = use_weighted_layer_sum
_A: List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A: Any = apply_spec_augment
_A: List[Any] = mask_time_prob
_A: Any = mask_time_length
_A: Any = mask_time_min_masks
_A: Union[str, Any] = mask_feature_prob
_A: List[Any] = mask_feature_length
_A: Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A: int = num_codevectors_per_group
_A: List[Any] = num_codevector_groups
_A: Optional[int] = contrastive_logits_temperature
_A: str = feat_quantizer_dropout
_A: List[str] = num_negatives
_A: Optional[Any] = codevector_dim
_A: Any = proj_codevector_dim
_A: Union[str, Any] = diversity_loss_weight
# ctc loss
_A: Optional[Any] = ctc_loss_reduction
_A: List[Any] = ctc_zero_infinity
# pretraining loss
_A: Tuple = replace_prob
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 366
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase : Dict = ['''note_seq''']
def __init__( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def __magic_name__ ( cls : Any , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __magic_name__ ( cls : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = LEDTokenizer
__UpperCamelCase : Optional[int] = LEDTokenizerFast
__UpperCamelCase : Union[str, Any] = True
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
_A: Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_A: Union[str, Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_A: Any = {'unk_token': '<unk>'}
_A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __magic_name__ ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_A: Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Dict = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_A: Any = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: List[Any] = tokenizer(text_target=lowerCAmelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: List[str] = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[str] = ['A long paragraph for summarization.']
_A: Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: int = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_A: Dict = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_A: str = inputs['input_ids']
_A: Optional[int] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Union[str, Any] = ['Summary of the text.', 'Another summary.']
_A: List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_A: str = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: List[Any] = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_A: Optional[Any] = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A: Dict = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A: int = 'A, <mask> AllenNLP sentence.'
_A: int = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_A: List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_A: Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 368
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a , a = None , a = None , a = None , a = None , a = None , a = False , ) -> List[str]:
_A: Optional[Any] = bnb_quantization_config.load_in_abit
_A: List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
_A: Union[str, Any] = []
# custom device map
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(device_map.keys() ) > 1:
_A: Union[str, Any] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_A: Any = get_keys_to_not_convert(__UpperCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCamelCase )
_A: Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_A: Optional[int] = []
_A: List[str] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCamelCase )
# compatibility with peft
_A: Tuple = load_in_abit
_A: Union[str, Any] = load_in_abit
_A: Optional[Any] = get_parameter_device(__UpperCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
_A: Dict = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
# convert param to the right dtype
_A: Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_A: str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
_A: str = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCamelCase ):
param.to(__UpperCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_A: Tuple = replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase )
_A: Optional[int] = get_quantized_model_device_map(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_A: Tuple = True
_A: List[str] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase )
def lowerCamelCase__ ( a , a , a=None , a=None , a=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
_A: Tuple = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
_A: Optional[int] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_A: Any = {}
_A: Union[str, Any] = special_dtypes
_A: List[Any] = no_split_module_classes
_A: str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_A: List[str] = get_balanced_memory(
__UpperCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__UpperCamelCase , **__UpperCamelCase , )
_A: List[str] = max_memory
_A: List[str] = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# check if don't have any quantized module on the cpu
_A: int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_A: Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCamelCase__ ( a , a , a=None , a=None ) -> Tuple:
if modules_to_not_convert is None:
_A: Dict = []
_A , _A: str = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCamelCase__ ( a , a , a=None , a=None , ) -> Dict:
_A: List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_A: Dict = []
current_key_name.append(__UpperCamelCase )
if isinstance(__UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_A: List[str] = '''.'''.join(__UpperCamelCase )
_A: List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_A: Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_A: int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_A: Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
_A: Union[str, Any] = module.weight.data
if module.bias is not None:
_A: Optional[Any] = module.bias.data
bnb_module.requires_grad_(__UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A: int = True
if len(list(module.children() ) ) > 0:
_A , _A: Any = _replace_with_bnb_layers(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A: List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( a ) -> List[str]:
with init_empty_weights():
_A: Optional[Any] = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_A: int = find_tied_parameters(__UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_A: str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A: Any = sum(__UpperCamelCase , [] )
_A: int = len(__UpperCamelCase ) > 0
# Check if it is a base model
_A: Tuple = False
if hasattr(__UpperCamelCase , '''base_model_prefix''' ):
_A: Any = not hasattr(__UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A: Optional[Any] = list(model.named_children() )
_A: str = [list_modules[-1][0]]
# add last module together with tied weights
_A: Dict = set(__UpperCamelCase ) - set(__UpperCamelCase )
_A: List[Any] = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase )
# remove ".weight" from the keys
_A: Tuple = ['''.weight''', '''.bias''']
_A: int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A: Union[str, Any] = name.replace(__UpperCamelCase , '''''' )
filtered_module_names.append(__UpperCamelCase )
return filtered_module_names
def lowerCamelCase__ ( a ) -> Union[str, Any]:
for m in model.modules():
if isinstance(__UpperCamelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase__ ( a ) -> int:
return next(parameter.parameters() ).device
def lowerCamelCase__ ( a , a , a , a , a , a , a ) -> List[Any]:
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase )
_A: str = param_name
_A: Optional[int] = model
if "." in tensor_name:
_A: int = tensor_name.split('''.''' )
for split in splits[:-1]:
_A: Tuple = getattr(__UpperCamelCase , __UpperCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_A: Dict = new_module
_A: str = splits[-1]
# offload weights
_A: str = False
offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __UpperCamelCase , index=__UpperCamelCase , )
else:
offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase )
offload_weight(__UpperCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __UpperCamelCase , index=__UpperCamelCase )
set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , '''meta''' , dtype=__UpperCamelCase , value=torch.empty(*param.size() ) )
| 369
|
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301
| 0
|
def lowerCamelCase__ ( a = 10**9 ) -> Tuple:
_A: Any = 1
_A: Dict = 2
_A: str = 0
_A: Union[str, Any] = 0
_A: str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( a , a=False ) -> int:
_A: List[Any] = OmegaConf.load(a__ )
if display:
print(yaml.dump(OmegaConf.to_container(a__ ) ) )
return config
def lowerCamelCase__ ( a , a=None , a=None ) -> Tuple:
if conf_path is None:
_A: str = '''./model_checkpoints/vqgan_only.yaml'''
_A: int = load_config(a__ , display=a__ )
_A: Any = VQModel(**config.model.params )
if ckpt_path is None:
_A: Dict = '''./model_checkpoints/vqgan_only.pt'''
_A: Optional[int] = torch.load(a__ , map_location=a__ )
if ".ckpt" in ckpt_path:
_A: Any = sd['''state_dict''']
model.load_state_dict(a__ , strict=a__ )
model.to(a__ )
del sd
return model
def lowerCamelCase__ ( a , a ) -> Tuple:
_A , _A , _A: List[str] = model.encode(a__ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_A: Dict = model.decode(a__ )
return xrec
def lowerCamelCase__ ( a , a=False ) -> Union[str, Any]:
_A , _A: Optional[int] = string.rsplit('''.''' , 1 )
if reload:
_A: List[Any] = importlib.import_module(a__ )
importlib.reload(a__ )
return getattr(importlib.import_module(a__ , package=a__ ) , cls )
def lowerCamelCase__ ( a ) -> Union[str, Any]:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowerCamelCase__ ( a , a , a=True , a=True ) -> Union[str, Any]:
_A: List[Any] = instantiate_from_config(a__ )
if sd is not None:
model.load_state_dict(a__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( a , a , a , a ) -> Tuple:
if ckpt:
_A: Optional[Any] = torch.load(a__ , map_location='''cpu''' )
_A: Tuple = pl_sd['''global_step''']
print(f"""loaded model from global step {global_step}.""" )
else:
_A: Tuple = {'''state_dict''': None}
_A: Optional[Any] = None
_A: Optional[Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=a__ , eval_mode=a__ )['''model''']
return model, global_step
| 371
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Dict=[1, 2, 1] , lowerCAmelCase_ : str=[2, 2, 4] , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : List[str]=["stage1", "stage2", "stage3"] , lowerCAmelCase_ : List[str]=[1, 2, 3] , ):
"""simple docstring"""
_A: Optional[int] = parent
_A: int = batch_size
_A: Optional[Any] = image_size
_A: Any = patch_size
_A: List[str] = num_channels
_A: int = embed_dim
_A: Optional[int] = depths
_A: Union[str, Any] = num_heads
_A: int = window_size
_A: Optional[int] = mlp_ratio
_A: Union[str, Any] = qkv_bias
_A: Optional[int] = hidden_dropout_prob
_A: Optional[int] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[str] = hidden_act
_A: Tuple = use_absolute_embeddings
_A: str = patch_norm
_A: int = layer_norm_eps
_A: Union[str, Any] = initializer_range
_A: Tuple = is_training
_A: Union[str, Any] = scope
_A: Union[str, Any] = use_labels
_A: Union[str, Any] = type_sequence_label_size
_A: List[str] = encoder_stride
_A: List[Any] = out_features
_A: int = out_indices
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Dict = None
if self.use_labels:
_A: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Dict = MaskFormerSwinModel(config=_A )
model.to(_A )
model.eval()
_A: Any = model(_A )
_A: Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_A: Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Union[str, Any] = MaskFormerSwinBackbone(config=_A )
model.to(_A )
model.eval()
_A: str = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_A ):
_A: int = ['stem']
_A: List[str] = MaskFormerSwinBackbone(config=_A )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = self.prepare_config_and_inputs()
_A: Union[str, Any] = config_and_inputs
_A: str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = MaskFormerSwinModelTester(self )
_A: str = ConfigTester(self , config_class=_A , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A: int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(_A )
_A: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: int = [*signature.parameters.keys()]
_A: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_A: Any = model(**self._prepare_for_class(_A , _A ) )
_A: str = outputs.hidden_states
_A: List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_A ) , _A )
# Swin has a different seq_length
_A: Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A: List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A: Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A: Tuple = True
self.check_hidden_states_output(_A , _A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: List[Any] = True
self.check_hidden_states_output(_A , _A , _A , _A )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A: List[Any] = 3
_A: Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A: Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A: Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A: Dict = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: Tuple = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase_ : Tuple ):
_A: List[Any] = 0
return t
def check_equivalence(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]={} ):
with torch.no_grad():
_A: str = model(**_A , return_dict=_A , **_A )
_A: Tuple = model(**_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif isinstance(_A , _A ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_A ) , set_nan_tensor_to_zero(_A ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}. Dict has"""
F""" `nan`: {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}."""
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
_A: Any = model_class(_A )
model.to(_A )
model.eval()
_A: Dict = self._prepare_for_class(_A , _A )
_A: List[str] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
_A: Union[str, Any] = self._prepare_for_class(_A , _A , return_labels=_A )
_A: Any = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
_A: str = self._prepare_for_class(_A , _A )
_A: Union[str, Any] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} )
_A: int = self._prepare_for_class(_A , _A , return_labels=_A )
_A: Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
__UpperCamelCase : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : int = MaskFormerSwinConfig
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[Any] = MaskFormerSwinModelTester(self )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs_for_common()
_A: Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_A: str = backbone_class(_A )
backbone.to(_A )
backbone.eval()
_A: List[str] = backbone(**_A )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _A )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_A: Any = backbone(**_A , output_hidden_states=_A )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A: Any = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_A: Any = backbone(**_A , output_attentions=_A )
self.assertIsNotNone(outputs.attentions )
| 350
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
| 0
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = BarthezTokenizer
__UpperCamelCase : Optional[int] = BarthezTokenizerFast
__UpperCamelCase : Any = True
__UpperCamelCase : List[Any] = True
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
_A: Dict = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_ )
_A: List[Any] = tokenizer
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = """<pad>"""
_A: Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_1_1_2_2 )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_A: Any = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_A: Any = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A: Any = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Union[str, Any] = self.get_tokenizer()
_A: Union[str, Any] = self.get_rust_tokenizer()
_A: Union[str, Any] = """I was born in 92000, and this is falsé."""
_A: Dict = tokenizer.tokenize(lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: int = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: int = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Optional[Any] = self.get_rust_tokenizer()
_A: Optional[Any] = tokenizer.encode(lowerCAmelCase_ )
_A: Any = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
# fmt: off
_A: int = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCAmelCase_ , )
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a=False ) -> List[str]:
_A: List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_A: Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( a , a , a=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_A: str = ''''''
else:
_A: Union[str, Any] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A: Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_A: Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A: List[str] = in_proj_weight[
: config.hidden_size, :
]
_A: Any = in_proj_bias[: config.hidden_size]
_A: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A: List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_A: List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( a , a , a ) -> str:
_A: List[Any] = dct.pop(lowerCAmelCase__ )
_A: List[str] = val
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A: Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
_A: Tuple = DeiTConfig()
# all deit models have fine-tuned heads
_A: Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_A: str = 10_00
_A: List[str] = '''huggingface/label-files'''
_A: Tuple = '''imagenet-1k-id2label.json'''
_A: Any = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_A: Optional[int] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_A: List[Any] = idalabel
_A: Any = {v: k for k, v in idalabel.items()}
_A: Dict = int(deit_name[-6:-4] )
_A: str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_A: List[str] = 1_92
_A: List[Any] = 7_68
_A: Optional[int] = 12
_A: Optional[Any] = 3
elif deit_name[9:].startswith('''small''' ):
_A: Optional[Any] = 3_84
_A: List[str] = 15_36
_A: Optional[int] = 12
_A: Optional[Any] = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_A: List[str] = 10_24
_A: List[Any] = 40_96
_A: Any = 24
_A: Optional[int] = 16
# load original model from timm
_A: Union[str, Any] = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A: Union[str, Any] = timm_model.state_dict()
_A: int = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
_A: Union[str, Any] = DeiTForImageClassificationWithTeacher(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_A: List[Any] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_A: Tuple = DeiTImageProcessor(size=lowerCAmelCase__ , crop_size=config.image_size )
_A: Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_A: List[Any] = encoding['''pixel_values''']
_A: str = model(lowerCAmelCase__ )
_A: Tuple = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase__ : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 352
|
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "openai-gpt"
__UpperCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any]=4_0_4_7_8 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Tuple=1_2 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Optional[int]="cls_index" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=0.1 , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
_A: List[Any] = vocab_size
_A: List[str] = n_positions
_A: Optional[Any] = n_embd
_A: Optional[Any] = n_layer
_A: Dict = n_head
_A: List[str] = afn
_A: str = resid_pdrop
_A: List[str] = embd_pdrop
_A: str = attn_pdrop
_A: Any = layer_norm_epsilon
_A: Optional[int] = initializer_range
_A: int = summary_type
_A: Optional[Any] = summary_use_proj
_A: Optional[Any] = summary_activation
_A: Tuple = summary_first_dropout
_A: int = summary_proj_to_labels
super().__init__(**lowerCAmelCase_ )
| 353
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
import argparse
import os
import re
import packaging.version
UpperCAmelCase__ : Any = "examples/"
UpperCAmelCase__ : Any = {
"examples": (re.compile(R'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(R'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCAmelCase__ : Tuple = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
UpperCAmelCase__ : Optional[int] = "README.md"
def lowerCamelCase__ ( a , a , a ) -> Optional[int]:
with open(a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A: List[str] = f.read()
_A , _A: int = REPLACE_PATTERNS[pattern]
_A: int = replace.replace('''VERSION''' , a )
_A: Optional[Any] = re_pattern.sub(a , a )
with open(a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(a )
def lowerCamelCase__ ( a ) -> Optional[int]:
for folder, directories, fnames in os.walk(a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(a , a ) , a , pattern='''examples''' )
def lowerCamelCase__ ( a , a=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a , a , a )
if not patch:
update_version_in_examples(a )
def lowerCamelCase__ ( ) -> Optional[Any]:
_A: Optional[int] = '''🤗 Transformers currently provides the following architectures'''
_A: Tuple = '''1. Want to contribute a new model?'''
with open(a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A: Tuple = f.readlines()
# Find the start of the list.
_A: Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A: Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_A: Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a )
def lowerCamelCase__ ( ) -> str:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_A: List[str] = f.read()
_A: List[str] = REPLACE_PATTERNS['''init'''][0].search(a ).groups()[0]
return packaging.version.parse(a )
def lowerCamelCase__ ( a=False ) -> Tuple:
_A: Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_A: str = default_version.base_version
elif patch:
_A: Optional[Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_A: Optional[Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_A: Optional[int] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(a ) == 0:
_A: List[Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(a , patch=a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ) -> Optional[Any]:
_A: int = get_version()
_A: Optional[Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_A: int = current_version.base_version
# Check with the user we got that right.
_A: List[str] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(a ) == 0:
_A: Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase__ : List[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''lxmert'''
__UpperCamelCase : Any = {}
def __init__( self : Any , lowerCAmelCase_ : List[Any]=3_0_5_2_2 , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : Optional[Any]=1_2 , lowerCAmelCase_ : Dict=9_5_0_0 , lowerCAmelCase_ : List[Any]=1_6_0_0 , lowerCAmelCase_ : List[Any]=4_0_0 , lowerCAmelCase_ : List[str]=3_0_7_2 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : List[Any]=9 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : str=2_0_4_8 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Any=6.67 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
_A: List[str] = vocab_size
_A: Union[str, Any] = hidden_size
_A: int = num_attention_heads
_A: Union[str, Any] = hidden_act
_A: Optional[Any] = intermediate_size
_A: str = hidden_dropout_prob
_A: Dict = attention_probs_dropout_prob
_A: Optional[int] = max_position_embeddings
_A: Tuple = type_vocab_size
_A: int = initializer_range
_A: Optional[int] = layer_norm_eps
_A: Union[str, Any] = num_qa_labels
_A: Any = num_object_labels
_A: Dict = num_attr_labels
_A: Any = l_layers
_A: List[Any] = x_layers
_A: Tuple = r_layers
_A: str = visual_feat_dim
_A: int = visual_pos_dim
_A: Optional[int] = visual_loss_normalizer
_A: Union[str, Any] = task_matched
_A: int = task_mask_lm
_A: int = task_obj_predict
_A: List[Any] = task_qa
_A: Any = visual_obj_loss
_A: Union[str, Any] = visual_attr_loss
_A: Tuple = visual_feat_loss
_A: Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = BioGptTokenizer
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A: str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A: Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_A: Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: Tuple = '''lower newer'''
_A: Union[str, Any] = '''lower newer'''
return input_text, output_text
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: str = BioGptTokenizer(self.vocab_file , self.merges_file )
_A: Optional[Any] = '''lower'''
_A: Tuple = ['''low''', '''er</w>''']
_A: Tuple = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_A: Any = tokens + ['''<unk>''']
_A: Union[str, Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: str = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_A: Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
_A: Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
_A: Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_A: int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
def lowerCamelCase__ ( a ) -> list:
_A: List[Any] = [0] * len(UpperCAmelCase_ )
for i in range(1 , len(UpperCAmelCase_ ) ):
# use last results for better performance - dynamic programming
_A: Optional[int] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_A: Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_A: List[str] = j
return prefix_result
def lowerCamelCase__ ( a ) -> int:
return max(prefix_function(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( a ) -> int:
_A: Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_A: Dict = 1_92
_A: Optional[Any] = 7_68
_A: Optional[int] = 12
_A: Any = 3
_A: List[Any] = [8_00, 13_33]
_A: Tuple = False
elif yolos_name == "yolos_s_dWr":
_A: Dict = 3_30
_A: Any = 14
_A: Tuple = 6
_A: Any = 13_20
elif "yolos_s" in yolos_name:
_A: Any = 3_84
_A: int = 15_36
_A: List[Any] = 12
_A: Tuple = 6
elif "yolos_b" in yolos_name:
_A: Tuple = [8_00, 13_44]
_A: Dict = 91
_A: Any = 'huggingface/label-files'
_A: List[str] = 'coco-detection-id2label.json'
_A: Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
_A: List[str] = {int(_A ): v for k, v in idalabel.items()}
_A: Tuple = idalabel
_A: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( a , a , a = False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A: List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_A: int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A: List[Any] = in_proj_weight[: config.hidden_size, :]
_A: Dict = in_proj_bias[: config.hidden_size]
_A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A: Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A: Optional[int] = in_proj_weight[-config.hidden_size :, :]
_A: str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( a ) -> Optional[int]:
if "backbone" in name:
_A: Tuple = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
_A: List[str] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
_A: List[str] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
_A: Dict = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
_A: Union[str, Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_A: Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
_A: Dict = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_A: Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_A: Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_A: Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_A: int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_A: List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A: Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
_A: List[str] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
_A: List[Any] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
_A: List[str] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCamelCase__ ( a , a ) -> Any:
for key in orig_state_dict.copy().keys():
_A: Dict = orig_state_dict.pop(_A )
if "qkv" in key:
_A: Tuple = key.split('''.''' )
_A: Any = int(key_split[2] )
_A: Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_A: List[Any] = val[:dim, :]
_A: List[Any] = val[
dim : dim * 2, :
]
_A: int = val[-dim:, :]
else:
_A: Any = val[:dim]
_A: Optional[int] = val[dim : dim * 2]
_A: Dict = val[-dim:]
else:
_A: Any = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
_A: Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A: List[Any] = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a , a , a , a = False ) -> Dict:
_A: int = get_yolos_config(_A )
# load original state_dict
_A: Dict = torch.load(_A , map_location='''cpu''' )['model']
# load 🤗 model
_A: str = YolosForObjectDetection(_A )
model.eval()
_A: Any = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
_A: Dict = 8_00 if yolos_name != 'yolos_ti' else 5_12
_A: str = YolosImageProcessor(format='''coco_detection''' , size=_A )
_A: List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_A: List[Any] = model(**_A )
_A: List[Any] = outputs.logits, outputs.pred_boxes
_A: List[str] = None, None
if yolos_name == "yolos_ti":
_A: List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_A: Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_A: List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_A: Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_A: Tuple = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_A: str = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_A: str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_A: Dict = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_A: int = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_A: str = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
_A: List[Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('''Pushing to the hub...''' )
_A: List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( a="" ) -> int:
_A: List[Any] = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_A: Union[str, Any] = AgentAudio(A__ )
_A: Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(A__ ) )
# Ensure that the file contains the same value as the original tensor
_A , _A: str = sf.read(A__ )
self.assertTrue(torch.allclose(A__ , torch.tensor(A__ ) , atol=1e-4 ) )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_A: Tuple = get_new_path(suffix='''.wav''' )
sf.write(A__ , A__ , 1_6_0_0_0 )
_A: List[str] = AgentAudio(A__ )
self.assertTrue(torch.allclose(A__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , A__ )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: int = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_A: Union[str, Any] = AgentImage(A__ )
_A: Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A__ ) )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_A: List[str] = Image.open(A__ )
_A: int = AgentImage(A__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A__ ) )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_A: Optional[int] = Image.open(A__ )
_A: int = AgentImage(A__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A__ ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: int = '''Hey!'''
_A: List[Any] = AgentText(A__ )
self.assertEqual(A__ , agent_type.to_string() )
self.assertEqual(A__ , agent_type.to_raw() )
self.assertEqual(A__ , A__ )
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
def lowerCamelCase__ ( a ) -> bool:
return str(a ) == str(a )[::-1]
def lowerCamelCase__ ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def lowerCamelCase__ ( a = 1_00_00 ) -> int:
_A: List[Any] = []
for num in range(1 , a ):
_A: List[Any] = 0
_A: int = num
while iterations < 50:
_A: List[str] = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=2 , ):
"""simple docstring"""
_A: Tuple = parent
_A: Any = batch_size
_A: Optional[Any] = image_size
_A: Any = patch_size
_A: Any = num_channels
_A: Dict = is_training
_A: List[Any] = use_labels
_A: int = hidden_size
_A: Optional[int] = num_hidden_layers
_A: Tuple = num_attention_heads
_A: Optional[Any] = intermediate_size
_A: List[Any] = hidden_act
_A: Tuple = hidden_dropout_prob
_A: Optional[int] = attention_probs_dropout_prob
_A: str = type_sequence_label_size
_A: List[Any] = initializer_range
_A: int = scope
_A: Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_A: str = (image_size // patch_size) ** 2
_A: str = num_patches + 2
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Tuple = None
if self.use_labels:
_A: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = DeiTModel(config=__A )
model.to(__A )
model.eval()
_A: Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = DeiTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_A: Optional[Any] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_A: str = 1
_A: Union[str, Any] = DeiTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_A: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: str = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: str = self.type_sequence_label_size
_A: Tuple = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
_A: Any = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A: Optional[Any] = 1
_A: int = DeiTForImageClassification(__A )
model.to(__A )
model.eval()
_A: int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[Any] = self.prepare_config_and_inputs()
(
_A
): int = config_and_inputs
_A: Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Dict = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Dict = False
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = DeiTModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: List[str] = model_class(__A )
_A: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Optional[int] = [*signature.parameters.keys()]
_A: str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=False ):
"""simple docstring"""
_A: Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_A: int = self.model_tester.prepare_config_and_inputs_for_common()
_A: int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_A: Tuple = model_class(__A )
model.to(__A )
model.train()
_A: List[str] = self._prepare_for_class(__A , __A , return_labels=__A )
_A: Union[str, Any] = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A: List[str] = False
_A: List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_A: Any = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
_A: Dict = self._prepare_for_class(__A , __A , return_labels=__A )
_A: Tuple = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A: Tuple = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
*get_values(__A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_A: Optional[int] = problem_type['''title''']
_A: Union[str, Any] = problem_type['''num_labels''']
_A: Optional[Any] = model_class(__A )
model.to(__A )
model.train()
_A: List[Any] = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
_A: int = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_A: Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
_A: List[Any] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: Tuple = DeiTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> List[str]:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: List[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
__A )
_A: int = self.default_image_processor
_A: List[Any] = prepare_img()
_A: Any = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A: List[Any] = model(**__A )
# verify the logits
_A: Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
_A: List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Dict = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_A: Tuple = self.default_image_processor
_A: Optional[Any] = prepare_img()
_A: int = image_processor(images=__A , return_tensors='''pt''' )
_A: Dict = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_A: List[Any] = model(__A )
| 361
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : str=1_8 , lowerCAmelCase_ : List[Any]=3_0 , lowerCAmelCase_ : Tuple=4_0_0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_A: Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8}
_A: int = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_A: List[str] = parent
_A: List[str] = batch_size
_A: Tuple = num_channels
_A: Tuple = image_size
_A: List[str] = min_resolution
_A: Union[str, Any] = max_resolution
_A: Union[str, Any] = do_resize
_A: List[str] = size
_A: Union[str, Any] = do_center_crop
_A: str = crop_size
_A: Any = do_normalize
_A: Optional[Any] = image_mean
_A: Dict = image_std
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LevitImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = LevitImageProcessingTester(self )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , '''image_mean''' ) )
self.assertTrue(hasattr(__a , '''image_std''' ) )
self.assertTrue(hasattr(__a , '''do_normalize''' ) )
self.assertTrue(hasattr(__a , '''do_resize''' ) )
self.assertTrue(hasattr(__a , '''do_center_crop''' ) )
self.assertTrue(hasattr(__a , '''size''' ) )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_A: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_A: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A: int = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_A: Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A: Union[str, Any] = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_A: Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A: Any = image_processing(__a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( ) -> int:
_A: Optional[int] = 10
_A: Union[str, Any] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
_A: Union[str, Any] = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(UpperCAmelCase__ ) ),
} , features=UpperCAmelCase__ , )
return dataset
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
_A: Any = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=UpperCAmelCase__ )
return filename
# FILE_CONTENT + files
UpperCAmelCase__ : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> str:
_A: Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
_A: List[str] = FILE_CONTENT
with open(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ )
return filename
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Any:
import bza
_A: Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
_A: List[Any] = bytes(UpperCAmelCase__ , '''utf-8''' )
with bza.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Optional[int]:
import gzip
_A: List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
_A: Tuple = bytes(UpperCAmelCase__ , '''utf-8''' )
with gzip.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> List[str]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_A: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
_A: List[Any] = bytes(UpperCAmelCase__ , '''utf-8''' )
with lza.frame.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_A: Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(UpperCAmelCase__ , '''w''' ) as archive:
archive.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> List[Any]:
import tarfile
_A: Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(UpperCAmelCase__ , '''w''' ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Any:
import lzma
_A: List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
_A: Optional[int] = bytes(UpperCAmelCase__ , '''utf-8''' )
with lzma.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> int:
import zipfile
_A: List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_A: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
_A: Optional[Any] = bytes(UpperCAmelCase__ , '''utf-8''' )
with zstd.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Any:
_A: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
_A: Optional[Any] = textwrap.dedent(
'''\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ )
return filename
UpperCAmelCase__ : str = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
UpperCAmelCase__ : Optional[int] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
UpperCAmelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
UpperCAmelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
UpperCAmelCase__ : Optional[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Tuple:
_A: List[str] = datasets.Dataset.from_dict(UpperCAmelCase__ )
_A: Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> int:
_A: str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(UpperCAmelCase__ ) ) as con:
_A: str = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Any:
_A: int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(UpperCAmelCase__ , '''w''' , newline='''''' ) as f:
_A: Tuple = csv.DictWriter(UpperCAmelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> str:
_A: Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(UpperCAmelCase__ , '''w''' , newline='''''' ) as f:
_A: List[Any] = csv.DictWriter(UpperCAmelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> Tuple:
import bza
_A: List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(UpperCAmelCase__ , '''rb''' ) as f:
_A: List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase__ , '''wb''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Dict:
_A: str = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Optional[int]:
_A: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
_A: Optional[int] = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(UpperCAmelCase__ , '''wb''' ) as f:
_A: Any = pq.ParquetWriter(UpperCAmelCase__ , schema=UpperCAmelCase__ )
_A: Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase__ ) )] for k in DATA[0]} , schema=UpperCAmelCase__ )
writer.write_table(UpperCAmelCase__ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> str:
_A: Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
_A: str = {'''data''': DATA}
with open(UpperCAmelCase__ , '''w''' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Dict:
_A: str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
_A: Optional[int] = {'''data''': DATA_DICT_OF_LISTS}
with open(UpperCAmelCase__ , '''w''' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Optional[int]:
_A: Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> int:
_A: List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> List[Any]:
_A: Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> List[Any]:
import gzip
_A: Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(UpperCAmelCase__ , '''rb''' ) as orig_file:
with gzip.open(UpperCAmelCase__ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> Dict:
import gzip
_A: str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(UpperCAmelCase__ , '''rb''' ) as orig_file:
with gzip.open(UpperCAmelCase__ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Optional[Any]:
_A: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a , a ) -> Dict:
_A: Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.join('''nested''' , os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Any:
_A: int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Tuple:
_A: Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(UpperCAmelCase__ , '''w''' ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
f.add(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a , a ) -> Dict:
_A: List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(UpperCAmelCase__ , '''w''' ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.join('''nested''' , os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
_A: Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> int:
_A: List[str] = ['''0''', '''1''', '''2''', '''3''']
_A: Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> str:
_A: Optional[Any] = ['''0''', '''1''', '''2''', '''3''']
_A: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(UpperCAmelCase__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> List[str]:
_A: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Tuple:
_A: int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
f.write(UpperCAmelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCAmelCase__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a , a ) -> Optional[int]:
_A: List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> str:
_A: Any = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
_A: int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( ) -> int:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( ) -> Optional[int]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(UpperCAmelCase__ , '''w''' ) as f:
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ) )
f.write(UpperCAmelCase__ , arcname=os.path.basename(UpperCAmelCase__ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( a ) -> Dict:
_A: Union[str, Any] = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 363
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
| 0
|
UpperCAmelCase__ : Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( a , a , a=None , **a ) -> Union[str, Any]:
_A: Union[str, Any] = [x.strip() for x in open(snake_case__ ).readlines()]
_A: Dict = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
_A: List[str] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase__ : List[Any] = TypeVar('_T')
class UpperCAmelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : str = None ):
"""simple docstring"""
_A: Optional[int] = list(iterable or [] )
_A: Optional[Any] = []
def __len__( self : int ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Any ):
"""simple docstring"""
self._stacka.append(lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = self._stacka.pop
_A: Dict = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 366
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_A: List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase , cache_dir=__lowercase )
_A: int = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase )[0] , '''snapshots''' ) )]
_A: Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: int = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase )
_A: Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: Any = jax.random.PRNGKey(0 )
_A: Dict = 4
_A: int = jax.device_count()
_A: Any = num_samples * [prompt]
_A: List[str] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A: Union[str, Any] = replicate(__lowercase )
_A: str = jax.random.split(__lowercase , __lowercase )
_A: int = shard(__lowercase )
_A: str = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
_A: Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowercase )
_A: Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: Any = jax.random.PRNGKey(0 )
_A: Union[str, Any] = 5_0
_A: Union[str, Any] = jax.device_count()
_A: int = num_samples * [prompt]
_A: Dict = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A: Any = replicate(__lowercase )
_A: Optional[Any] = jax.random.split(__lowercase , __lowercase )
_A: str = shard(__lowercase )
_A: Optional[int] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase )
_A: Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: Union[str, Any] = jax.random.PRNGKey(0 )
_A: Optional[int] = 5_0
_A: Optional[int] = jax.device_count()
_A: int = num_samples * [prompt]
_A: str = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A: Dict = replicate(__lowercase )
_A: Optional[int] = jax.random.split(__lowercase , __lowercase )
_A: List[str] = shard(__lowercase )
_A: int = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
_A: Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: Any = jax.random.PRNGKey(0 )
_A: Dict = 5_0
_A: Union[str, Any] = jax.device_count()
_A: str = num_samples * [prompt]
_A: Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A: Optional[int] = replicate(__lowercase )
_A: int = jax.random.split(__lowercase , __lowercase )
_A: List[str] = shard(__lowercase )
_A: int = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
_A: List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
_A: Dict = scheduler.create_state()
_A: Optional[Any] = scheduler_state
_A: List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: int = jax.random.PRNGKey(0 )
_A: List[Any] = 5_0
_A: Optional[int] = jax.device_count()
_A: int = num_samples * [prompt]
_A: Dict = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A: Dict = replicate(__lowercase )
_A: Optional[int] = jax.random.split(__lowercase , __lowercase )
_A: List[str] = shard(__lowercase )
_A: Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A: Any = jax.device_count()
_A: Dict = num_samples * [prompt]
_A: List[Any] = jax.random.split(jax.random.PRNGKey(0 ) , __lowercase )
_A: List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , )
_A: str = replicate(__lowercase )
_A: str = pipeline.prepare_inputs(__lowercase )
_A: Union[str, Any] = shard(__lowercase )
_A: Union[str, Any] = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_A: List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_A: Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
_A: Optional[int] = replicate(__lowercase )
_A: List[str] = pipeline.prepare_inputs(__lowercase )
_A: Any = shard(__lowercase )
_A: Any = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_A: List[str] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
def lowerCamelCase__ ( a , a ) -> int:
while b:
_A , _A: Union[str, Any] = b, a % b
return a
def lowerCamelCase__ ( a , a ) -> int:
return a if b == 0 else euclidean_gcd_recursive(snake_case_ , a % b )
def lowerCamelCase__ ( ) -> Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 368
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase__ : Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int = 1_4 ):
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_A: str = primes[group]['prime']
_A: Optional[int] = primes[group]['generator']
_A: Tuple = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: int = pow(self.generator , self.__private_key , self.prime )
return hex(_UpperCAmelCase )[2:]
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = int(_UpperCAmelCase , base=1_6 )
if not self.is_valid_public_key(_UpperCAmelCase ):
raise ValueError('''Invalid public key''' )
_A: Any = pow(_UpperCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCAmelCase , (prime - 1) // 2 , _UpperCAmelCase ) == 1
)
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 1_4 ):
"""simple docstring"""
_A: str = int(_UpperCAmelCase , base=1_6 )
_A: int = int(_UpperCAmelCase , base=1_6 )
_A: Any = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''Invalid public key''' )
_A: List[str] = pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase__ : Dict = get_logger(__name__)
UpperCAmelCase__ : str = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(__snake_case )
def __call__( self : Dict , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(__snake_case )
def __call__( self : List[str] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
@add_start_docstrings(__snake_case )
def __call__( self : Dict , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int , **lowerCAmelCase_ : Any ):
"""simple docstring"""
for processor in self:
_A: Optional[Any] = inspect.signature(processor.__call__ ).parameters
if len(__snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
_A: Optional[Any] = processor(__snake_case , __snake_case , __snake_case , **__snake_case )
else:
_A: List[str] = processor(__snake_case , __snake_case , __snake_case )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : float ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
_A: Optional[Any] = temperature
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: str = scores / self.temperature
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : float = -float('''Inf''' ) , lowerCAmelCase_ : int = 1 ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(__snake_case , __snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
_A: Dict = top_p
_A: int = filter_value
_A: Tuple = min_tokens_to_keep
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A , _A: Union[str, Any] = lax.top_k(__snake_case , scores.shape[-1] )
_A: Optional[int] = jnp.full_like(__snake_case , self.filter_value )
_A: str = jax.nn.softmax(__snake_case , axis=-1 ).cumsum(axis=-1 )
_A: Dict = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_A: str = jnp.roll(__snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(__snake_case )
# min tokens to keep
_A: Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__snake_case )
_A: Dict = jnp.where(__snake_case , __snake_case , __snake_case )
_A: List[Any] = jax.lax.sort_key_val(__snake_case , __snake_case )[-1]
return next_scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : float = -float('''Inf''' ) , lowerCAmelCase_ : int = 1 ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
_A: Optional[int] = max(__snake_case , __snake_case )
_A: Tuple = filter_value
def __call__( self : int , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A , _A: Optional[Any] = scores.shape
_A: Dict = jnp.full(batch_size * vocab_size , self.filter_value )
_A: List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
_A , _A: List[Any] = lax.top_k(__snake_case , __snake_case )
_A: Tuple = jnp.broadcast_to((jnp.arange(__snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_A: Tuple = topk_scores.flatten()
_A: Union[str, Any] = topk_indices.flatten() + shift
_A: Optional[int] = next_scores_flat.at[topk_indices_flat].set(__snake_case )
_A: Union[str, Any] = next_scores_flat.reshape(__snake_case , __snake_case )
return next_scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[Any] = bos_token_id
def __call__( self : List[Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = jnp.full(scores.shape , -float('''inf''' ) )
_A: Dict = 1 - jnp.bool_(cur_len - 1 )
_A: int = jnp.where(__snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , __snake_case )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[Any] = max_length
_A: List[Any] = eos_token_id
def __call__( self : int , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[str] = jnp.full(scores.shape , -float('''inf''' ) )
_A: Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_A: Union[str, Any] = jnp.where(__snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , __snake_case )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(__snake_case , __snake_case ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
_A: int = min_length
_A: Dict = eos_token_id
def __call__( self : Any , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
_A: Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_A: Any = jnp.where(__snake_case , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __snake_case )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: Any = list(__snake_case )
_A: List[str] = begin_index
def __call__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
_A: str = jnp.where(__snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __snake_case )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : list ):
"""simple docstring"""
_A: List[str] = list(__snake_case )
def __call__( self : Dict , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: Optional[Any] = dict(__snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_A: Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_A: int = force_token_array.at[index].set(__snake_case )
_A: Union[str, Any] = jnp.intaa(__snake_case )
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
def _force_token(lowerCAmelCase_ : Optional[Any] ):
_A: Tuple = scores.shape[0]
_A: Optional[int] = self.force_token_array[generation_idx]
_A: List[Any] = jnp.ones_like(__snake_case , dtype=scores.dtype ) * -float('''inf''' )
_A: Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_A: Optional[int] = lax.dynamic_update_slice(__snake_case , __snake_case , (0, current_token) )
return new_scores
_A: Union[str, Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__snake_case ) , lambda: scores , ) , )
return scores
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = generate_config.eos_token_id
_A: List[str] = generate_config.no_timestamps_token_id
_A: List[str] = generate_config.no_timestamps_token_id + 1
_A: Optional[int] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__snake_case , '''max_initial_timestamp_index''' ):
_A: str = generate_config.max_initial_timestamp_index
else:
_A: Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_A: List[Any] = model_config.vocab_size
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
_A: Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ):
_A: Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , __snake_case , __snake_case )
_A: Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __snake_case , )
_A: Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , __snake_case , __snake_case )
_A: List[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __snake_case , __snake_case , )
return jnp.where(
__snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __snake_case , )
_A: List[str] = jax.vmap(__snake_case )(__snake_case , __snake_case )
_A: List[Any] = jnp.where(cur_len == self.begin_index , __snake_case , __snake_case )
_A: List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __snake_case , )
_A: Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
_A: List[str] = jnp.where(
__snake_case , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
_A: Tuple = jax.nn.log_softmax(__snake_case , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ):
_A: str = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_A: str = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __snake_case , )
_A: List[str] = jax.vmap(__snake_case )(__snake_case , __snake_case )
return scores
| 371
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCamelCase__ ( a , a ) -> Image:
def brightness(a ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(a )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCAmelCase__ : List[Any] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 350
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
| 0
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = '''umt5'''
__UpperCamelCase : Tuple = ['''past_key_values''']
def __init__( self : str , lowerCAmelCase_ : Any=2_5_0_1_1_2 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Any=1_2_8 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=1e-6 , lowerCAmelCase_ : int=1.0 , lowerCAmelCase_ : Dict="gated-gelu" , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="T5Tokenizer" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Any=0 , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase_ , tokenizer_class=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = vocab_size
_A: Dict = d_model
_A: int = d_kv
_A: List[str] = d_ff
_A: Any = num_layers
_A: List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A: int = num_heads
_A: str = relative_attention_num_buckets
_A: Optional[Any] = relative_attention_max_distance
_A: Tuple = dropout_rate
_A: Union[str, Any] = layer_norm_epsilon
_A: Optional[int] = initializer_factor
_A: List[Any] = feed_forward_proj
_A: int = use_cache
_A: Tuple = self.feed_forward_proj.split('''-''' )
_A: List[Any] = act_info[-1]
_A: int = act_info[0] == '''gated'''
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_A: Tuple = '''gelu_new'''
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.num_heads
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return self.num_layers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_A: List[str] = '''past_encoder_sequence + sequence'''
_A: Optional[Any] = {0: '''batch'''}
_A: List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_A: Any = {0: '''batch''', 1: '''decoder_sequence'''}
_A: List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __magic_name__ ( self : int ):
"""simple docstring"""
return 1_3
@property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return 5e-4
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Dict = ShapEImgaImgPipeline
__UpperCamelCase : int = ['''image''']
__UpperCamelCase : Any = ['''image''']
__UpperCamelCase : List[str] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase : List[str] = False
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return 3_2
@property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return 3_2
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return 8
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A: str = CLIPVisionModel(lowerCAmelCase_ )
return model
@property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Any = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_A: Optional[Any] = PriorTransformer(**lowerCAmelCase_ )
return model
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Dict = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
_A: Dict = ShapERenderer(**lowerCAmelCase_ )
return model
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.dummy_prior
_A: Tuple = self.dummy_image_encoder
_A: Tuple = self.dummy_image_processor
_A: List[str] = self.dummy_renderer
_A: Union[str, Any] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase_ , clip_sample=lowerCAmelCase_ , clip_sample_range=1.0 , )
_A: List[str] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ):
"""simple docstring"""
_A: Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_A: Union[str, Any] = torch.manual_seed(lowerCAmelCase_ )
else:
_A: Optional[int] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A: Optional[int] = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = '''cpu'''
_A: int = self.get_dummy_components()
_A: int = self.pipeline_class(**lowerCAmelCase_ )
_A: Dict = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: List[str] = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
_A: Any = output.images[0]
_A: str = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_A: Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: str = torch_device == '''cpu'''
_A: int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Dict = self.get_dummy_components()
_A: Union[str, Any] = self.pipeline_class(**lowerCAmelCase_ )
_A: Optional[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Any = 1
_A: Dict = 2
_A: int = self.get_dummy_inputs(lowerCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
_A: Optional[int] = batch_size * [inputs[key]]
_A: str = pipe(**lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
_A: List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
_A: Dict = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
_A: Optional[int] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A: Optional[Any] = pipe(
lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 352
|
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = CodeGenTokenizer
__UpperCamelCase : Union[str, Any] = CodeGenTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[Any] = {'''add_prefix_space''': True}
__UpperCamelCase : Any = False
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A: str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_A: Union[str, Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_A: Any = {'''unk_token''': '''<unk>'''}
_A: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __magic_name__ ( self : str , **lowerCAmelCase_ : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , **lowerCAmelCase_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = '''lower newer'''
_A: Dict = '''lower newer'''
return input_text, output_text
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A: int = '''lower newer'''
_A: int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_A: Any = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Optional[int] = tokens + [tokenizer.unk_token]
_A: int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: List[str] = self.get_tokenizer()
_A: List[str] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
_A: Optional[int] = '''lower newer'''
# Testing tokenization
_A: str = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_A: Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_A: Tuple = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
_A: Tuple = tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_A: int = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing the unknown token
_A: Dict = tokens + [rust_tokenizer.unk_token]
_A: Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any ):
"""simple docstring"""
pass
def __magic_name__ ( self : int , lowerCAmelCase_ : Union[str, Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
_A: Optional[int] = '''This is a simple input'''
_A: Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_A: Tuple = ('''This is a simple input''', '''This is a pair''')
_A: str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_A: Union[str, Any] = '''This is a simple input'''
_A: Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
_A: Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_A: Optional[int] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_A: Dict = tokenizer.pad_token_id
_A: Any = tokenizer(lowerCAmelCase_ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = tokenizer(*lowerCAmelCase_ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_A: Union[str, Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Tuple = '''$$$'''
_A: Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )
_A: Tuple = '''This is a simple input'''
_A: Any = ['''This is a simple input 1''', '''This is a simple input 2''']
_A: List[str] = tokenizer.bos_token_id
_A: int = tokenizer(lowerCAmelCase_ )
_A: int = tokenizer(lowerCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_A: List[Any] = tokenizer.decode(out_s.input_ids )
_A: Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
_A: List[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
_A: Optional[Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
_A: Optional[Any] = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
_A: str = tokenizer.decode(lowerCAmelCase_ , truncate_before_pattern=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
| 353
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( a ) -> Optional[int]:
# getting number of pixels in the image
_A: Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a ):
for j in range(a ):
_A: int = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : Union[str, Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
UpperCAmelCase__ : str = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase__ ( a , a=() , a=None , a="no" , a="29500" ) -> int:
_A: Dict = False
_A: Dict = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
_A: Any = True
elif "IPython" in sys.modules:
_A: Any = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
_A: Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
_A: Dict = 8
_A: Tuple = PrepareForLaunch(a , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(a , args=a , nprocs=a , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port=a , mixed_precision=a ):
_A: Optional[Any] = PrepareForLaunch(a , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_A: Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a )
def lowerCamelCase__ ( a , a=() , a=2 ) -> List[str]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
_A: int = PrepareForLaunch(a , debug=a )
start_processes(a , args=a , nprocs=a , start_method='''fork''' )
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Dict=6.0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str="fp4" , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
_A: Dict = load_in_abit
_A: List[Any] = load_in_abit
_A: int = llm_inta_threshold
_A: Tuple = llm_inta_skip_modules
_A: Optional[Any] = llm_inta_enable_fpaa_cpu_offload
_A: Any = llm_inta_has_fpaa_weight
_A: str = bnb_abit_quant_type
_A: Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_A: int = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
_A: Dict = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __magic_name__ ( cls : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = cls(**lowerCAmelCase_ )
_A: List[Any] = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] ):
"""simple docstring"""
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
_A: Optional[int] = self.to_dict()
_A: List[str] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + '''\n'''
writer.write(lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = copy.deepcopy(self.__dict__ )
_A: Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : str ):
"""simple docstring"""
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : bool = True ):
"""simple docstring"""
if use_diff is True:
_A: Any = self.to_diff_dict()
else:
_A: List[str] = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.to_dict()
# get the default config dict
_A: Dict = BitsAndBytesConfig().to_dict()
_A: Tuple = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_A: str = value
return serializable_config_dict
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 357
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : int=False , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: Dict = vocab_size
_A: Union[str, Any] = d_embed
_A: Union[str, Any] = d_proj
_A: Optional[Any] = cutoffs + [vocab_size]
_A: Union[str, Any] = [0] + self.cutoffs
_A: Optional[Any] = div_val
_A: Optional[Any] = self.cutoffs[0]
_A: Optional[int] = len(self.cutoffs ) - 1
_A: Optional[int] = self.shortlist_size + self.n_clusters
_A: Optional[int] = keep_order
_A: List[Any] = []
_A: Optional[int] = []
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
if self.n_clusters > 0:
_A: Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name='''cluster_weight''' )
_A: Dict = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A: Union[str, Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_projs_._{i}""" , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_A: Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_layers_._{i}_._weight""" , )
_A: str = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A: Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A: str = self.d_embed // (self.div_val**i)
_A: Union[str, Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_projs_._{i}""" )
self.out_projs.append(lowerCAmelCase_ )
_A: str = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_layers_._{i}_._weight""" , )
_A: int = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCAmelCase_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=None ):
"""simple docstring"""
_A: str = x
if proj is not None:
_A: List[str] = tf.einsum('''ibd,ed->ibe''' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('''ibd,nd->ibn''' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def __magic_name__ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[Any] = shape_list(lowerCAmelCase_ )
_A: str = tf.range(lp_size[0] , dtype=target.dtype )
_A: Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False ):
"""simple docstring"""
_A: str = 0
if self.n_clusters == 0:
_A: Any = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A: Tuple = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_A: str = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_A: Optional[int] = shape_list(lowerCAmelCase_ )
_A: str = []
_A: int = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A: str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A: List[Any] = (target >= l_idx) & (target < r_idx)
_A: Any = tf.where(lowerCAmelCase_ )
_A: Dict = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_A: str = self.out_layers[0][0][l_idx:r_idx]
_A: Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
_A: Dict = self.out_layers[i][0]
_A: Union[str, Any] = self.out_layers[i][1]
if i == 0:
_A: Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
_A: Dict = tf.concat([cur_b, self.cluster_bias] , 0 )
_A: Optional[int] = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_A: Union[str, Any] = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A: List[str] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_A: Union[str, Any] = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_A: Union[str, Any] = tf.nn.log_softmax(lowerCAmelCase_ )
_A: Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A: Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_A: Tuple = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Optional[int] = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_A: Optional[int] = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_A: Optional[Any] = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
import sys
import turtle
def lowerCamelCase__ ( a , a ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ ( a , a , a , a , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(a , get_mid(a , a ) , get_mid(a , a ) , depth - 1 )
triangle(a , get_mid(a , a ) , get_mid(a , a ) , depth - 1 )
triangle(a , get_mid(a , a ) , get_mid(a , a ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
UpperCAmelCase__ : Union[str, Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
UpperCAmelCase__ : Union[str, Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 360
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase__ : str = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 0
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : int = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
UpperCAmelCase__ : int = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( a , a ) -> int:
_A: Union[str, Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_A: Tuple = int(re.match(R'''.*layer_(\d*).*''' , a )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def lowerCamelCase__ ( a ) -> Optional[Any]:
if dtype == torch.bool:
return 1 / 8
_A: Tuple = re.search(R'''[^\d](\d+)$''' , str(a ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
_A: Any = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( a , a , a , a , a ) -> Tuple:
# Construct model
if bloom_config_file == "":
_A: Union[str, Any] = BloomConfig()
else:
_A: Optional[Any] = BloomConfig.from_json_file(a )
if shard_model:
_A: Dict = os.listdir(a )
_A: Optional[Any] = sorted(filter(lambda a : s.startswith('''layer''' ) and "model_00" in s , a ) )
_A: Tuple = {'''weight_map''': {}, '''metadata''': {}}
_A: Optional[Any] = 0
_A: int = None
_A: str = BloomConfig()
for j, file in enumerate(a ):
print('''Processing file: {}'''.format(a ) )
_A: Tuple = None
for i in range(a ):
# load all TP files
_A: str = file.replace('''model_00''' , f"""model_0{i}""" )
_A: str = torch.load(os.path.join(a , a ) , map_location='''cpu''' )
# Rename keys in the transformers names
_A: str = list(temp.keys() )
for key in keys:
_A: str = temp.pop(a )
if tensors is None:
_A: int = temp
else:
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A: Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_A: int = torch.cat([tensors[key], temp[key]] , dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_A: str = tensors[key] / pretraining_tp
torch.save(
a , os.path.join(
a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_A: Dict = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_A: Union[str, Any] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) )
_A: Union[str, Any] = BloomConfig()
_A: str = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_A: Optional[int] = total_size
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
_A: Optional[Any] = json.dumps(a , indent=2 , sort_keys=a ) + '''\n'''
f.write(a )
else:
_A: int = BloomModel(a )
_A: Tuple = os.listdir(a )
_A: str = sorted(filter(lambda a : s.startswith('''layer''' ) and "model_00" in s , a ) )
_A: Optional[Any] = None
for i, file in enumerate(a ):
_A: str = None
for i in range(a ):
# load all TP files
_A: Tuple = file.replace('''model_00''' , f"""model_0{i}""" )
_A: List[Any] = torch.load(os.path.join(a , a ) , map_location='''cpu''' )
# Rename keys in the transformers names
_A: Tuple = list(temp.keys() )
for key in keys:
_A: int = temp.pop(a )
if tensors is None:
_A: Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A: Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_A: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_A: List[Any] = tensors[key] / pretraining_tp
_A: str = model.load_state_dict(a , strict=a )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
_A: Union[str, Any] = set(other_keys.missing_keys )
else:
_A: Any = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(a , exist_ok=a )
_A: Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_A: Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
_A: List[str] = model.to(config.torch_dtype )
torch.save(model.state_dict() , a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( a ) -> int:
_A: list[list[int]] = [[0 for _ in range(a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A: Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase__ : Any = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase__ : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 363
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
| 0
|
from math import pi, sqrt
def lowerCamelCase__ ( a ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(a ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(a )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
assert gamma(0.5 ) == sqrt(a )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : Dict = 1.0
while num:
UpperCAmelCase__ : Optional[Any] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 366
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = KandinskyInpaintPipeline
__UpperCamelCase : Union[str, Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__UpperCamelCase : List[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__UpperCamelCase : Dict = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase : List[str] = False
@property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return 3_2
@property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return 3_2
@property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return 1_0_0
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Tuple = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_A: int = MultilingualCLIP(lowerCAmelCase_ )
_A: Tuple = text_encoder.eval()
return text_encoder
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Optional[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_A: Dict = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.dummy_text_encoder
_A: Dict = self.dummy_tokenizer
_A: Union[str, Any] = self.dummy_unet
_A: Dict = self.dummy_movq
_A: int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase_ , )
_A: Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple=0 ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A: Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase_ )
# create init_image
_A: Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A: Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A: Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_A: Any = np.ones((6_4, 6_4) , dtype=np.floataa )
_A: str = 0
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_A: Optional[Any] = torch.manual_seed(lowerCAmelCase_ )
else:
_A: Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A: Optional[int] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = '''cpu'''
_A: int = self.get_dummy_components()
_A: List[Any] = self.pipeline_class(**lowerCAmelCase_ )
_A: Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Optional[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
_A: Dict = output.images
_A: str = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
_A: Union[str, Any] = image[0, -3:, -3:, -1]
_A: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_A: List[str] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_A: List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_A: str = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_A: Any = 0
_A: List[Any] = '''a hat'''
_A: Tuple = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
_A: Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_A: List[Any] = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A: Optional[Any] = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_A: Optional[Any] = pipeline(
lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_A: List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( a , a , a ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : Dict=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=1_0_0_0 , ):
"""simple docstring"""
_A: str = parent
_A: Optional[Any] = batch_size
_A: Tuple = seq_length
_A: Union[str, Any] = is_training
_A: Any = use_input_mask
_A: Tuple = use_token_type_ids
_A: Optional[Any] = use_labels
_A: List[Any] = vocab_size
_A: Optional[int] = hidden_size
_A: str = num_hidden_layers
_A: int = num_attention_heads
_A: Dict = intermediate_size
_A: Optional[Any] = hidden_act
_A: List[str] = hidden_dropout_prob
_A: Optional[int] = attention_probs_dropout_prob
_A: Union[str, Any] = max_position_embeddings
_A: Any = type_vocab_size
_A: Any = type_sequence_label_size
_A: List[str] = initializer_range
_A: Optional[int] = num_labels
_A: int = num_choices
_A: Optional[Any] = scope
_A: Union[str, Any] = range_bbox
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_A: str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A: Union[str, Any] = bbox[i, j, 3]
_A: Union[str, Any] = bbox[i, j, 1]
_A: Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A: Optional[Any] = bbox[i, j, 2]
_A: Any = bbox[i, j, 0]
_A: Union[str, Any] = t
_A: Dict = tf.convert_to_tensor(lowerCAmelCase_ )
_A: Any = None
if self.use_input_mask:
_A: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_A: Any = None
if self.use_token_type_ids:
_A: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A: str = None
_A: str = None
_A: List[str] = None
if self.use_labels:
_A: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A: List[str] = ids_tensor([self.batch_size] , self.num_choices )
_A: str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Dict = TFLayoutLMModel(config=lowerCAmelCase_ )
_A: Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A: List[Any] = model(lowerCAmelCase_ , lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A: List[str] = model(lowerCAmelCase_ , lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = TFLayoutLMForMaskedLM(config=lowerCAmelCase_ )
_A: List[str] = model(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = self.num_labels
_A: Dict = TFLayoutLMForSequenceClassification(config=lowerCAmelCase_ )
_A: List[str] = model(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: List[str] = self.num_labels
_A: Dict = TFLayoutLMForTokenClassification(config=lowerCAmelCase_ )
_A: Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: List[Any] = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase_ )
_A: Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Dict = self.prepare_config_and_inputs()
(
_A
): List[str] = config_and_inputs
_A: Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = True
__UpperCamelCase : Optional[int] = 10
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = TFLayoutLMModelTester(self )
_A: List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: List[str] = TFLayoutLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( ) -> int:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_A: Tuple = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_A: List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_A: Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_A: Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_A: List[str] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[int] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_A: Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_A: Union[str, Any] = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
# test the sequence output on [0, :3, :3]
_A: Union[str, Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-3 ) )
# test the pooled output on [1, :3]
_A: List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCAmelCase_ , atol=1e-3 ) )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[str] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
_A: List[str] = prepare_layoutlm_batch_inputs()
# forward pass
_A: Optional[Any] = model(
input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_A: str = outputs.loss
_A: Optional[Any] = (2,)
self.assertEqual(loss.shape , lowerCAmelCase_ )
# test the shape of the logits
_A: Tuple = outputs.logits
_A: List[Any] = (2, 2)
self.assertEqual(logits.shape , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
_A: Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_A: List[Any] = model(
input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
# test the shape of the logits
_A: Any = outputs.logits
_A: Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_A: Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_A: Optional[Any] = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
# test the shape of the logits
_A: List[str] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , lowerCAmelCase_ )
self.assertEqual(outputs.end_logits.shape , lowerCAmelCase_ )
| 369
|
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301
| 0
|
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 371
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301
| 0
|
"""simple docstring"""
import math
def lowerCamelCase__ ( a , a ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 350
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
"""simple docstring"""
import operator as op
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Tuple = []
_A: Union[str, Any] = lambda a , a : int(x / y ) # noqa: E731 integer division operation
_A: str = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(a ) , sep=''' | ''' )
else:
_A: Dict = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(a ) , sep=''' | ''' )
_A: Tuple = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(a ) , sep=''' | ''' )
stack.append(
str(opr[x](int(a ) , int(a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(a ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 352
|
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase__ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase__ : Tuple = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase__ : Optional[Any] = 0
for log in Path().glob('*.log'):
UpperCAmelCase__ : List[str] = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase__ : List[Any] = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase__ : Optional[Any] = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase__ : Dict = F"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase__ : Tuple = []
log.unlink()
UpperCAmelCase__ : List[Any] = ''
UpperCAmelCase__ : Dict = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Tuple = {}
for test in failed_tests:
UpperCAmelCase__ : int = test[0].split('::')
UpperCAmelCase__ : Optional[int] = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase__ : Any = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase__ : Any = [test[0] for test in failed_table]
UpperCAmelCase__ : str = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase__ : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase__ : List[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCAmelCase__ : List[str] = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase__ : Optional[Any] = len(err) + 10
UpperCAmelCase__ : List[Any] = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCAmelCase__ : Optional[Any] = 'No failed tests! 🤗'
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase__ : List[str] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase__ : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase__ : Union[str, Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase__ : List[Any] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase__ : List[Any] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase__ : List[str] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase__ : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase__ : Optional[int] = row[0]
else:
UpperCAmelCase__ : int = ''
UpperCAmelCase__ : Optional[int] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 353
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = XGLMTokenizer
__UpperCamelCase : Optional[Any] = XGLMTokenizerFast
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = True
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_A: Tuple = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = '''<pad>'''
_A: str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_8 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A: str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_A: Dict = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __magic_name__ ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
_A: Optional[Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_ )
_A: List[Any] = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: int = self.get_tokenizer()
_A: str = self.get_rust_tokenizer()
_A: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
_A: List[str] = tokenizer.tokenize(lowerCAmelCase_ )
_A: str = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: List[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Any = self.get_rust_tokenizer()
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = '''Hello World!'''
_A: Optional[Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
_A: List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: str = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''facebook/xglm-564M''' , padding=lowerCAmelCase_ , )
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
import math
def lowerCamelCase__ ( a ) -> list[int]:
_A: Dict = []
_A: int = 2
_A: Any = int(math.sqrt(a ) ) # Size of every segment
_A: int = [True] * (end + 1)
_A: Any = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
_A: int = False
start += 1
prime += in_prime
_A: Union[str, Any] = end + 1
_A: List[Any] = min(2 * end , a )
while low <= n:
_A: Any = [True] * (high - low + 1)
for each in in_prime:
_A: Optional[int] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
_A: Union[str, Any] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
_A: Optional[int] = high + 1
_A: Dict = min(high + end , a )
return prime
print(sieve(10**6))
| 355
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 0
|
from manim import *
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[Any] = Rectangle(height=0.5 , width=0.5 )
_A: int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A: List[str] = [mem.copy() for i in range(6 )]
_A: Optional[Any] = [mem.copy() for i in range(6 )]
_A: Any = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_A: Union[str, Any] = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_A: Union[str, Any] = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_A: Union[str, Any] = Text('''CPU''' , font_size=2_4 )
_A: Union[str, Any] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase_ )
_A: str = [mem.copy() for i in range(1 )]
_A: List[str] = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_A: Dict = Text('''GPU''' , font_size=2_4 )
_A: str = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
gpu.align_to(lowerCAmelCase_ , lowerCAmelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase_ )
_A: Optional[Any] = [mem.copy() for i in range(6 )]
_A: str = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_A: Union[str, Any] = Text('''Model''' , font_size=2_4 )
_A: Optional[Any] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) , )
_A: Optional[Any] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=2_4 , )
_A: int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A: Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase_ , run_time=2.5 ) , Write(lowerCAmelCase_ ) , Write(lowerCAmelCase_ ) )
self.add(lowerCAmelCase_ )
_A: List[Any] = []
_A: Union[str, Any] = []
_A: List[str] = []
for i, rect in enumerate(lowerCAmelCase_ ):
_A: Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase_ )
cpu_target.generate_target()
_A: Optional[int] = 0.46 / 4
_A: Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase_ , buff=0.0 )
cpu_targs.append(lowerCAmelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase_ ) )
second_animations.append(MoveToTarget(lowerCAmelCase_ , run_time=1.5 ) )
self.play(*lowerCAmelCase_ )
self.play(*lowerCAmelCase_ )
self.wait()
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( a ) -> bool:
_A: List[str] = len(a )
# We need to create solution object to save path.
_A: Optional[Any] = [[0 for _ in range(a )] for _ in range(a )]
_A: List[Any] = run_maze(a , 0 , 0 , a )
if solved:
print('''\n'''.join(str(a ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowerCamelCase__ ( a , a , a , a ) -> bool:
_A: Tuple = len(a )
# Final check point.
if i == j == (size - 1):
_A: List[str] = 1
return True
_A: Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
_A: int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_A: Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_A: Optional[Any] = 1
# check for directions
if (
run_maze(a , i + 1 , a , a )
or run_maze(a , a , j + 1 , a )
or run_maze(a , i - 1 , a , a )
or run_maze(a , a , j - 1 , a )
):
return True
_A: Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301
| 0
|
from __future__ import annotations
from statistics import mean
def lowerCamelCase__ ( a , a , a ) -> list[int]:
_A: List[str] = [0] * no_of_processes
_A: Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a ):
_A: Any = burst_time[i]
_A: list[int] = []
_A: Tuple = 0
_A: Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_A: List[Any] = []
_A: Union[str, Any] = -1
for i in range(a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a )
if len(a ) > 0:
_A: List[str] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_A: List[str] = i
total_time += burst_time[target_process]
completed += 1
_A: Any = 0
_A: Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCamelCase__ ( a , a , a ) -> list[int]:
_A: Dict = [0] * no_of_processes
for i in range(a ):
_A: int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : int = [2, 5, 3, 7]
UpperCAmelCase__ : Optional[Any] = [0, 0, 0, 0]
UpperCAmelCase__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase__ : Tuple = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowerCamelCase__ ( a , a ) -> Any:
inspect_dataset(a , a )
_A: Any = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowerCamelCase__ ( a , a ) -> Tuple:
inspect_metric(a , a )
_A: str = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: int = get_dataset_config_info(a , config_name=a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
with pytest.raises(a ):
get_dataset_config_info(a , config_name=a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
_A: Optional[Any] = get_dataset_config_names(a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> Tuple:
_A: List[Any] = get_dataset_infos(a )
assert list(infos.keys() ) == expected_configs
_A: Union[str, Any] = expected_configs[0]
assert expected_config in infos
_A: Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: Any = get_dataset_infos(a )
assert expected_config in infos
_A: str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( a , a , a ) -> Optional[Any]:
with pytest.raises(a ):
get_dataset_split_names(a , config_name=a )
| 359
|
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase__ : Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCamelCase__ ( a ) -> str:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
UpperCAmelCase__ : Dict = parser.parse_args()
if args.check_lib:
UpperCAmelCase__ : Any = importlib.import_module('transformers')
UpperCAmelCase__ : Tuple = Path(transformers_module.__file__).parent
else:
UpperCAmelCase__ : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 360
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase__ : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCAmelCase__ : Union[str, Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ : Optional[Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase__ : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( a ) -> List[Any]:
_A: List[str] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , a )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ) -> Any:
_A: int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_A: Any = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_A: Union[str, Any] = collections.defaultdict(a )
_A: List[str] = collections.defaultdict(a )
_A: int = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
_A: List[str] = None
if _re_tf_models.match(a ) is not None:
_A: List[Any] = tf_models
_A: Union[str, Any] = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_A: Optional[Any] = flax_models
_A: str = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_A: Any = pt_models
_A: List[str] = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
_A: Tuple = True
break
# Try again after removing the last word in the name
_A: List[Any] = ''''''.join(camel_case_split(a )[:-1] )
_A: str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_A: Tuple = list(a )
all_models.sort()
_A: str = {'''model_type''': all_models}
_A: int = [pt_models[t] for t in all_models]
_A: str = [tf_models[t] for t in all_models]
_A: List[str] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_A: Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_A: Optional[int] = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_A: Any = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_A: Dict = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_A: List[Any] = '''AutoTokenizer'''
_A: Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(a )
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_A: Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
_A: Dict = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
_A: List[str] = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( a , a ) -> List[Any]:
_A: Optional[int] = get_frameworks_table()
_A: List[Any] = Dataset.from_pandas(a )
_A: List[Any] = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=a )
_A: Any = Dataset.from_json(a )
_A: Tuple = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(a ) )
}
_A: str = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_A: str = sorted(table.keys() )
_A: List[str] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_A: Dict = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(a , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_A: Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_A: str = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=a , repo_type='''dataset''' , token=a , commit_message=a , )
def lowerCamelCase__ ( ) -> List[Any]:
_A: Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_A: Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
_A: int = []
for key in pipeline_tasks:
if key not in in_table:
_A: Dict = pipeline_tasks[key]['''pt''']
if isinstance(a , (list, tuple) ):
_A: Optional[Any] = model[0]
_A: Optional[int] = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
_A: Optional[Any] = ''', '''.join(a )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
UpperCAmelCase__ : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 361
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( a ) -> list[int]:
if len(a ) == 0:
return array
_A: List[str] = min(a ), max(a )
# Compute the variables
_A: Optional[int] = _max - _min + 1
_A: List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_A: Any = i - _min
_A: List[str] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_A: Optional[int] = 0
for i in range(a ):
while holes_repeat[i] > 0:
_A: List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : List[Any] = input('Enter numbers separated by comma:\n')
UpperCAmelCase__ : Tuple = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( a = 1_00_00_00 ) -> int:
_A: Any = 1
_A: Dict = 1
_A: List[str] = {1: 1}
for inputa in range(2 , a ):
_A: Any = 0
_A: Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A: int = (3 * number) + 1
counter += 1
if inputa not in counters:
_A: Union[str, Any] = counter
if counter > pre_counter:
_A: Dict = inputa
_A: Optional[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 363
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301
| 0
|
def lowerCamelCase__ ( a ) -> list:
if len(a ) <= 1:
return lst
_A: int = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_A: Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_A: Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : int = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.