code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCAmelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Any] = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : int = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowerCAmelCase : str = os.path.abspath("""examples""" )
for item in os.listdir(_snake_case ):
if item not in EXCLUDE_EXAMPLES:
__lowerCAmelCase : Dict = os.path.join(_snake_case , _snake_case )
if os.path.isfile(_snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=_snake_case , feature_script=_snake_case , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowerCAmelCase : List[Any] = compare_against_test(
os.path.join(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case )
__lowerCAmelCase : List[str] = """\n""".join(_snake_case )
if special_strings is not None:
for string in special_strings:
__lowerCAmelCase : List[Any] = diff.replace(_snake_case , """""" )
self.assertEqual(_snake_case , """""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _snake_case )
self.one_complete_example("""complete_nlp_example.py""" , _snake_case )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowerCAmelCase : Union[str, Any] = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _snake_case , _snake_case , _snake_case )
self.one_complete_example("""complete_cv_example.py""" , _snake_case , _snake_case , _snake_case )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] =False
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUpClass()
__lowerCAmelCase : int = tempfile.mkdtemp()
__lowerCAmelCase : str = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowerCAmelCase : str = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = f'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
__lowerCAmelCase : Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n '''.split()
__lowerCAmelCase : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_snake_case )
self.assertNotIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = f'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n '''.split()
__lowerCAmelCase : int = run_command(self._launch_args + testargs , return_stdout=_snake_case )
if torch.cuda.is_available():
__lowerCAmelCase : Union[str, Any] = torch.cuda.device_count()
else:
__lowerCAmelCase : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
else:
self.assertIn("""epoch 0:""" , _snake_case )
self.assertIn("""epoch 1:""" , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = """\n examples/by_feature/cross_validation.py\n --num_folds 2\n """.split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowerCAmelCase : int = run_command(self._launch_args + testargs , return_stdout=_snake_case )
__lowerCAmelCase : Tuple = re.findall("""({.+})""" , _snake_case )
__lowerCAmelCase : Optional[int] = [r for r in results if """accuracy""" in r][-1]
__lowerCAmelCase : Optional[Any] = ast.literal_eval(_snake_case )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCAmelCase : Dict = f'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_snake_case , """tracking""" ) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 651 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str =parent
UpperCamelCase__ : List[str] =batch_size
UpperCamelCase__ : List[Any] =seq_length
UpperCamelCase__ : List[str] =is_training
UpperCamelCase__ : Optional[Any] =use_attention_mask
UpperCamelCase__ : Tuple =use_token_type_ids
UpperCamelCase__ : Optional[Any] =use_labels
UpperCamelCase__ : Union[str, Any] =vocab_size
UpperCamelCase__ : int =hidden_size
UpperCamelCase__ : Dict =num_hidden_layers
UpperCamelCase__ : Any =num_attention_heads
UpperCamelCase__ : List[str] =intermediate_size
UpperCamelCase__ : Any =hidden_act
UpperCamelCase__ : str =hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
UpperCamelCase__ : List[Any] =max_position_embeddings
UpperCamelCase__ : int =type_vocab_size
UpperCamelCase__ : Any =type_sequence_label_size
UpperCamelCase__ : Optional[Any] =initializer_range
UpperCamelCase__ : str =num_choices
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : Optional[int] =None
if self.use_attention_mask:
UpperCamelCase__ : List[str] =random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : List[Any] =None
if self.use_token_type_ids:
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase__ : Optional[int] =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =config_and_inputs
UpperCamelCase__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any =FlaxAlbertModelTester(self)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Dict =model_class_name.from_pretrained("albert-base-v2")
UpperCamelCase__ : Any =model(np.ones((1, 1)))
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_flax
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =FlaxAlbertModel.from_pretrained("albert-base-v2")
UpperCamelCase__ : Dict =np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
UpperCamelCase__ : Optional[Any] =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase__ : List[Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
UpperCamelCase__ : List[str] =(1, 11, 7_68)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 582 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 582 | 1 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list ):
"""simple docstring"""
a_ = 0
while len(__snake_case ) > 1:
a_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a_ = files.index(min(__snake_case ) )
temp += files[min_index]
files.pop(__snake_case )
files.append(__snake_case )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 536 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = BlenderbotSmallTokenizer
__snake_case = False
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ : Union[str, Any] =["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
A__ : Dict =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : List[str] =["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
A__ : Optional[Any] ={"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowercase__ ( self : List[Any] , **lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ="""adapt act apte"""
A__ : Any ="""adapt act apte"""
return input_text, output_text
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : List[str] ="""adapt act apte"""
A__ : Union[str, Any] =["""adapt""", """act""", """ap@@""", """te"""]
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A__ : str =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
A__ : str ="""I am a small frog."""
A__ : Dict =tok([src_text] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
A__ : int =tok.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : str =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
A__ : Dict ="""I am a small frog ."""
A__ : Union[str, Any] ="""."""
A__ : Optional[int] =tok(lowerCAmelCase_ )["""input_ids"""]
A__ : List[str] =tok(lowerCAmelCase_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 215 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Tuple = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Tuple = CycleDiffusionPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
UpperCAmelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
UpperCAmelCase__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase: Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase: Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__lowerCamelCase: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase: int = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase: Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=0 ):
__lowerCamelCase: Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
__lowerCamelCase: int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase: Union[str, Any] = self.get_dummy_components()
__lowerCamelCase: Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = output.images
__lowerCamelCase: int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase: Dict = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , """half""" ):
__lowerCamelCase: Tuple = module.half()
__lowerCamelCase: Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = output.images
__lowerCamelCase: Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase: List[str] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCamelCase: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
__lowerCamelCase: str = init_image.resize((512, 512) )
__lowerCamelCase: Dict = """CompVis/stable-diffusion-v1-4"""
__lowerCamelCase: Optional[int] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" )
__lowerCamelCase: List[Any] = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase: List[Any] = """A black colored car"""
__lowerCamelCase: List[Any] = """A blue colored car"""
__lowerCamelCase: List[Any] = torch.manual_seed(0 )
__lowerCamelCase: Any = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
__lowerCamelCase: Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowerCamelCase: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
__lowerCamelCase: List[str] = init_image.resize((512, 512) )
__lowerCamelCase: List[Any] = """CompVis/stable-diffusion-v1-4"""
__lowerCamelCase: Optional[Any] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" )
__lowerCamelCase: Dict = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase: Optional[int] = """A black colored car"""
__lowerCamelCase: int = """A blue colored car"""
__lowerCamelCase: str = torch.manual_seed(0 )
__lowerCamelCase: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
__lowerCamelCase: Any = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 189 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ):
__lowerCamelCase : Optional[int] = "maskformer-swin"
__lowerCamelCase : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Any:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 18 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
SCREAMING_SNAKE_CASE_ = list[list[float | int]]
def lowercase__ ( lowerCAmelCase : Matrix , lowerCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase )]
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for row in range(lowerCAmelCase ):
for col in range(lowerCAmelCase ):
UpperCAmelCase = matrix[row][col]
UpperCAmelCase = vector[row][0]
UpperCAmelCase = 0
UpperCAmelCase = 0
while row < size and col < size:
# pivoting
UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase , lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase , UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase ):
UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase ):
for row in range(lowerCAmelCase ):
UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase )
]
def lowercase__ ( lowerCAmelCase : list[int] ) -> Callable[[int], int]:
"""simple docstring"""
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
UpperCAmelCase = [[0] for _ in range(lowerCAmelCase )]
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for x_val, y_val in enumerate(lowerCAmelCase ):
for col in range(lowerCAmelCase ):
UpperCAmelCase = (x_val + 1) ** (size - col - 1)
UpperCAmelCase = y_val
UpperCAmelCase = solve(lowerCAmelCase , lowerCAmelCase )
def interpolated_func(lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase ) )
return interpolated_func
def lowercase__ ( lowerCAmelCase : int ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase__ ( lowerCAmelCase : Callable[[int], int] = question_function , lowerCAmelCase : int = 10 ) -> int:
"""simple docstring"""
UpperCAmelCase = [func(lowerCAmelCase ) for x_val in range(1 , order + 1 )]
UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase = 0
UpperCAmelCase = 42
UpperCAmelCase = 42
for poly in polynomials:
UpperCAmelCase = 1
while func(lowerCAmelCase ) == poly(lowerCAmelCase ):
x_val += 1
ret += poly(lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 373 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[10, 20, 30, 40] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ):
'''simple docstring'''
__a : Tuple = parent
__a : str = batch_size
__a : Any = image_size
__a : int = num_channels
__a : Union[str, Any] = embeddings_size
__a : int = hidden_sizes
__a : Any = depths
__a : List[str] = is_training
__a : int = use_labels
__a : int = hidden_act
__a : Optional[int] = num_labels
__a : int = scope
__a : List[Any] = len(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = RegNetModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : List[str] = RegNetForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
__a : Tuple = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a : Any = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = RegNetModelTester(self )
__a : Any = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__(self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_lowercase )
__a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : str = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__a : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Dict = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : int = layer_type
__a : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = RegNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __magic_name__ ( ):
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
__a : Optional[Any] = self.default_image_processor
__a : str = prepare_img()
__a : List[str] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**_lowercase )
# verify the logits
__a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 709 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 0 |
from math import isqrt, loga
def _lowerCamelCase ( __lowerCamelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__ : List[str] = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def _lowerCamelCase ( __lowerCamelCase = 80_0800 , __lowerCamelCase = 80_0800 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = degree * loga(lowerCamelCase__ )
UpperCAmelCase__ : List[Any] = int(lowerCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = calculate_prime_numbers(lowerCamelCase__ )
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : int = len(lowerCamelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_SCREAMING_SNAKE_CASE : List[Any] = b * b - 4 * a * c
_SCREAMING_SNAKE_CASE : Optional[int] = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCAmelCase ( ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = quadratic_roots(a=5, b=6, c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 572 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """SpeechT5FeatureExtractor"""
__magic_name__ :List[Any] = """SpeechT5Tokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('text' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('text_target' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('audio_target' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = None
if audio_target is not None:
lowerCAmelCase__ :int = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :int = targets['input_values']
elif text_target is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Dict = targets['input_ids']
else:
lowerCAmelCase__ :Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Dict = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.pop('input_values' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('labels' , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase__ :Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ :List[str] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = targets['input_ids']
else:
lowerCAmelCase__ :Optional[int] = self.feature_extractor.feature_size
lowerCAmelCase__ :int = self.feature_extractor.num_mel_bins
lowerCAmelCase__ :Dict = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feature_size_hack
lowerCAmelCase__ :str = targets['input_values']
else:
lowerCAmelCase__ :Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Tuple = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 560 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 200_0000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :str = [0 for i in range(n + 1 )]
lowerCAmelCase__ :Any = 1
lowerCAmelCase__ :str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 1
lowerCAmelCase__ :Dict = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 560 | 1 |
import requests
_a : Optional[int] = """""" # <-- Put your OpenWeatherMap appid here!
_a : Any = """https://api.openweathermap.org/data/2.5/"""
def snake_case__ ( UpperCAmelCase : str = "Chicago" , UpperCAmelCase : str = APPID ):
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( UpperCAmelCase : str = "Kolkata, India" , UpperCAmelCase : str = APPID ):
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( UpperCAmelCase : float = 55.68 , UpperCAmelCase : float = 12.57 , UpperCAmelCase : str = APPID ):
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_a : Union[str, Any] = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 145 |
from collections import Counter
from timeit import timeit
def snake_case__ ( UpperCAmelCase : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( UpperCAmelCase : str = "" ):
if len(UpperCAmelCase ) == 0:
return True
lowerCAmelCase__ :List[str] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase__ :Tuple = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
lowerCAmelCase__ :Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( UpperCAmelCase : str = "" ):
print("\nFor string = " , UpperCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_a : Any = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 145 | 1 |
__lowerCamelCase = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 455 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(lowercase ,lowercase) else "train"
UpperCAmelCase_ : Optional[int] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : Tuple = streaming
UpperCAmelCase_ : Union[str, Any] = num_proc
UpperCAmelCase_ : Union[str, Any] = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[str] = keep_in_memory
UpperCAmelCase_ : Optional[int] = streaming
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : str = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
| 455 | 1 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A = None ,_A = None ,_A = False ,**_A ,):
'''simple docstring'''
super().__init__(features=_A ,cache_dir=_A ,keep_in_memory=_A ,**_A )
_lowerCAmelCase : List[Any] = Sql(
cache_dir=_A ,features=_A ,sql=_A ,con=_A ,**_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=_A ,download_mode=_A ,verification_mode=_A ,base_path=_A ,)
# Build dataset for splits
_lowerCAmelCase : Dict = self.builder.as_dataset(
split='train' ,verification_mode=_A ,in_memory=self.keep_in_memory )
return dataset
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_lowerCAmelCase : Union[str, Any] = dataset
_lowerCAmelCase : Any = name
_lowerCAmelCase : Optional[int] = con
_lowerCAmelCase : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase : Any = num_proc
_lowerCAmelCase : Optional[int] = to_sql_kwargs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.to_sql_kwargs.pop('sql' ,_A )
_lowerCAmelCase : List[Any] = self.to_sql_kwargs.pop('con' ,_A )
_lowerCAmelCase : Dict = self.to_sql_kwargs.pop('index' ,_A )
_lowerCAmelCase : Tuple = self._write(index=_A ,**self.to_sql_kwargs )
return written
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = args
_lowerCAmelCase : Union[str, Any] = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCAmelCase : Tuple = query_table(
table=self.dataset.data ,key=slice(_A ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_lowerCAmelCase : str = batch.to_pandas()
_lowerCAmelCase : str = df.to_sql(self.name ,self.con ,index=_A ,**_A )
return num_rows or len(_A )
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_A ,_A )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 259 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 259 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a__ :
def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=sys.maxsize):
"""simple docstring"""
__UpperCAmelCase : str = "bilinear"
__UpperCAmelCase : Optional[int] = max_size
__UpperCAmelCase : List[Any] = short_edge_length
def __call__( self : Dict , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : Dict = []
for img in imgs:
__UpperCAmelCase : Any = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCAmelCase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__UpperCAmelCase : List[Any] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_)
if h < w:
__UpperCAmelCase : int = size, scale * w
else:
__UpperCAmelCase : Dict = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_) > self.max_size:
__UpperCAmelCase : Optional[int] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : str = newh * scale
__UpperCAmelCase : str = neww * scale
__UpperCAmelCase : int = int(neww + 0.5)
__UpperCAmelCase : Tuple = int(newh + 0.5)
if img.dtype == np.uinta:
__UpperCAmelCase : Optional[Any] = Image.fromarray(UpperCamelCase_)
__UpperCAmelCase : int = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__UpperCAmelCase : List[str] = np.asarray(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__UpperCAmelCase : Optional[Any] = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_).squeeze(0)
img_augs.append(UpperCamelCase_)
return img_augs
class a__ :
def __init__( self : str , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__UpperCAmelCase : Any = cfg.INPUT.FORMAT
__UpperCAmelCase : Tuple = cfg.SIZE_DIVISIBILITY
__UpperCAmelCase : Union[str, Any] = cfg.PAD_VALUE
__UpperCAmelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST
__UpperCAmelCase : List[str] = cfg.MODEL.DEVICE
__UpperCAmelCase : str = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__UpperCAmelCase : str = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__UpperCAmelCase : Dict = lambda UpperCamelCase_: (x - self.pixel_mean) / self.pixel_std
def a_ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = tuple(max(UpperCamelCase_) for s in zip(*[img.shape for img in images]))
__UpperCAmelCase : List[str] = [im.shape[-2:] for im in images]
__UpperCAmelCase : Tuple = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_)
]
return torch.stack(UpperCamelCase_), torch.tensor(UpperCamelCase_)
def __call__( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=False):
"""simple docstring"""
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : Union[str, Any] = [images]
if single_image:
assert len(UpperCamelCase_) == 1
for i in range(len(UpperCamelCase_)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__UpperCAmelCase : Optional[int] = torch.tensor([im.shape[:2] for im in images])
__UpperCAmelCase : Dict = self.aug(UpperCamelCase_)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCAmelCase : Optional[Any] = [self.normalizer(UpperCamelCase_) for x in images]
# now pad them to do the following operations
__UpperCAmelCase : Tuple = self.pad(UpperCamelCase_)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCAmelCase : List[Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
__UpperCAmelCase : Any = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase )
| 710 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/"))
__UpperCAmelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , )
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "src/transformers"
shutil.rmtree(self.transformer_dir)
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=None):
"""simple docstring"""
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCAmelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__UpperCAmelCase : Dict = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_)
__UpperCAmelCase : Dict = os.path.join(self.transformer_dir , "new_code.py")
with open(UpperCamelCase_ , "w" , newline="\n") as f:
f.write(UpperCamelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_)
with open(UpperCamelCase_ , "r") as f:
self.assertTrue(f.read() , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
# Copy consistency with a really long name
__UpperCAmelCase : Optional[Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , UpperCamelCase_ , UpperCamelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase_ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__UpperCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : Any = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
self.assertFalse(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase , __UpperCAmelCase : int = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__UpperCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
| 487 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 526 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A=False ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
a : Tuple = len(set_a.intersection(_A ) )
if alternative_union:
a : Union[str, Any] = len(_A ) + len(_A )
else:
a : Optional[Any] = len(set_a.union(_A ) )
return intersection / union
if isinstance(_A , (list, tuple) ) and isinstance(_A , (list, tuple) ):
a : int = [element for element in set_a if element in set_b]
if alternative_union:
a : Optional[int] = len(_A ) + len(_A )
return len(_A ) / union
else:
a : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(_A ) / len(_A )
return len(_A ) / len(_A )
return None
if __name__ == "__main__":
lowerCAmelCase: str = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase: Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 526 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_SCREAMING_SNAKE_CASE = "path-to-your-trained-model"
_SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_SCREAMING_SNAKE_CASE = pipe.to(device)
# to channels last
_SCREAMING_SNAKE_CASE = pipe.unet.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE = pipe.vae.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_SCREAMING_SNAKE_CASE = torch.randn(2, 4, 64, 64)
_SCREAMING_SNAKE_CASE = torch.rand(1) * 9_99
_SCREAMING_SNAKE_CASE = torch.randn(2, 77, 7_68)
_SCREAMING_SNAKE_CASE = (sample, timestep, encoder_hidden_status)
try:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_SCREAMING_SNAKE_CASE = 6_66
_SCREAMING_SNAKE_CASE = torch.Generator(device).manual_seed(seed)
_SCREAMING_SNAKE_CASE = {"generator": generator}
if args.steps is not None:
_SCREAMING_SNAKE_CASE = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_SCREAMING_SNAKE_CASE = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 489 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = range(2, 20 + 1)
_SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE = {}
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase , _lowerCAmelCase = 0, 0
_lowerCAmelCase = n - i
_lowerCAmelCase = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
_lowerCAmelCase = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
_lowerCAmelCase = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCAmelCase = _k
break
if max_jump >= 0:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCAmelCase = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = []
else:
_lowerCAmelCase = {c: []}
_lowerCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCAmelCase , _lowerCAmelCase = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
_lowerCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCAmelCase = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCAmelCase = i
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCAmelCase = ds_c + ds_b
diff += addend
_lowerCAmelCase = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = a_i[j] + addend
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = digits[j] + addend
if s >= 10:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
_lowerCAmelCase = addend // 10 + quotient
else:
_lowerCAmelCase = s
_lowerCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
digits.append(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : int = 10**15 ):
'''simple docstring'''
_lowerCAmelCase = [1]
_lowerCAmelCase = 1
_lowerCAmelCase = 0
while True:
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , 20 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 'M-CLIP'
def __init__( self , snake_case=1024 , snake_case=768 , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = transformerDimSize
UpperCamelCase__ = imageDimSize
super().__init__(**snake_case )
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = MCLIPConfig
def __init__( self , snake_case , *snake_case , **snake_case ):
'''simple docstring'''
super().__init__(snake_case , *snake_case , **snake_case )
UpperCamelCase__ = XLMRobertaModel(snake_case )
UpperCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.transformer(input_ids=snake_case , attention_mask=snake_case )[0]
UpperCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case ), embs
| 551 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
super().__init__(
features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , )
UpperCamelCase__ = Generator(
cache_dir=snake_case , features=snake_case , generator=snake_case , gen_kwargs=snake_case , **snake_case , )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , )
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=snake_case , in_memory=self.keep_in_memory )
return dataset
| 551 | 1 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_snake_case : Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
_a = [[0 for _ in range(lowerCamelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_a = 1
for n in range(m + 1 ):
for k in range(1, lowerCamelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__snake_case : Optional[int] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
__snake_case : int = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 131 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Dict = """transfo-xl"""
__UpperCAmelCase : Any = ["""mems"""]
__UpperCAmelCase : Any = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=2_6_7_7_3_5 , snake_case_=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case_=1_0_2_4 , snake_case_=1_0_2_4 , snake_case_=1_6 , snake_case_=6_4 , snake_case_=4_0_9_6 , snake_case_=4 , snake_case_=False , snake_case_=1_8 , snake_case_=1_6_0_0 , snake_case_=1_0_0_0 , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=-1 , snake_case_=True , snake_case_=0.1 , snake_case_=0.0 , snake_case_=True , snake_case_="normal" , snake_case_=0.01 , snake_case_=0.01 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0 , **snake_case_ , ) -> str:
_a = vocab_size
_a = []
self.cutoffs.extend(snake_case_ )
if proj_share_all_but_first:
_a = [False] + [True] * len(self.cutoffs )
else:
_a = [False] + [False] * len(self.cutoffs )
_a = d_model
_a = d_embed
_a = d_head
_a = d_inner
_a = div_val
_a = pre_lnorm
_a = n_layer
_a = n_head
_a = mem_len
_a = same_length
_a = attn_type
_a = clamp_len
_a = sample_softmax
_a = adaptive
_a = dropout
_a = dropatt
_a = untie_r
_a = init
_a = init_range
_a = proj_init_std
_a = init_std
_a = layer_norm_epsilon
super().__init__(eos_token_id=snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Tuple:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 131 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def A ( A_ : Path , A_ : list ):
snake_case : List[Any] = '''\n'''.join(A_ )
Path(A_ ).open('''w''' ).writelines(A_ )
UpperCAmelCase = "patrickvonplaten/t5-tiny-random"
UpperCAmelCase = "sshleifer/bart-tiny-random"
UpperCAmelCase = "sshleifer/tiny-mbart"
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( __magic_name__ ):
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : List[str] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
snake_case : List[Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
snake_case : Dict = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
snake_case : Any = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
snake_case : str = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
snake_case : List[Any] = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(SCREAMING_SNAKE_CASE_, '''argv''', SCREAMING_SNAKE_CASE_ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE_ ).exists()
# os.remove(Path(output_file_name))
def __snake_case ( self : Tuple ):
self.run_eval_tester(SCREAMING_SNAKE_CASE_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.run_eval_tester(SCREAMING_SNAKE_CASE_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
snake_case : Dict = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
snake_case : Optional[Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
snake_case : Union[str, Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
snake_case : List[Any] = Path(self.get_auto_remove_tmp_dir() )
snake_case : Dict = str(tmp_dir / '''scores.json''' )
snake_case : Optional[Any] = str(tmp_dir / '''val.target''' )
_dump_articles(SCREAMING_SNAKE_CASE_, text['''en'''] )
_dump_articles(SCREAMING_SNAKE_CASE_, text['''de'''] )
snake_case : List[str] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
snake_case : List[Any] = F"""
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE_ )}
{str(SCREAMING_SNAKE_CASE_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(SCREAMING_SNAKE_CASE_, '''argv''', SCREAMING_SNAKE_CASE_ ):
with CaptureStdout() as cs:
run_search()
snake_case : Union[str, Any] = [''' num_beams | length_penalty''', model, '''Best score args''']
snake_case : Optional[int] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE_ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE_ ) )
| 718 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A ( A_ : List[str] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A ( A_ : Union[str, Any] , A_ : Any ):
if args.student_type == "roberta":
snake_case : Union[str, Any] = False
elif args.student_type == "gpt2":
snake_case : Union[str, Any] = False
def A ( A_ : Dict , A_ : int ):
if args.student_type == "roberta":
snake_case : str = False
def A ( ):
snake_case : Optional[Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A_ , required=A_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A_ , required=A_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A_ , required=A_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A_ , type=A_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A_ , required=A_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=A_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=A_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A_ , default=4000 , help='''Checkpoint interval.''' )
snake_case : Tuple = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A_ ) , A_ , indent=4 )
git_log(args.dump_path )
snake_case, snake_case, snake_case : Optional[int] = MODEL_CLASSES[args.student_type]
snake_case, snake_case, snake_case : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case : Optional[int] = tokenizer.all_special_tokens.index(A_ )
snake_case : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
snake_case : List[Any] = special_tok_ids
snake_case : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
snake_case : Optional[int] = pickle.load(A_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
snake_case : int = pickle.load(A_ )
snake_case : Union[str, Any] = np.maximum(A_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case : List[Any] = 0.0 # do not predict special tokens
snake_case : Dict = torch.from_numpy(A_ )
else:
snake_case : Any = None
snake_case : Optional[Any] = LmSeqsDataset(params=A_ , data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
snake_case : Tuple = student_config_class.from_pretrained(args.student_config )
snake_case : str = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case : List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=A_ )
else:
snake_case : int = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
snake_case : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_ , A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_ , A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case : Tuple = Distiller(
params=A_ , dataset=A_ , token_probs=A_ , student=A_ , teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 555 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = os.getenv("DATASETS_VERBOSITY" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase ( ):
'''simple docstring'''
return __name__.split("." )[0]
def lowerCamelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase ( _snake_case : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowercase__ = _get_library_name()
return logging.getLogger(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
return set_verbosity(_snake_case )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = False
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Tuple: # pylint: disable=unused-argument
lowercase__ = args[0] if args else None
def __iter__( self ) -> Tuple:
return iter(self._iterator )
def __getattr__( self ,UpperCAmelCase_ ) -> Dict:
def empty_fn(*UpperCAmelCase_ ,**UpperCAmelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Dict:
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
return
SCREAMING_SNAKE_CASE__ = True
class snake_case :
def __call__( self ,*UpperCAmelCase_ ,UpperCAmelCase_=False ,**UpperCAmelCase_ ) -> Optional[Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCAmelCase_ ,**UpperCAmelCase_ )
else:
return EmptyTqdm(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Union[str, Any]:
lowercase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
lowercase__ = True
def lowerCamelCase ( ):
'''simple docstring'''
global _tqdm_active
lowercase__ = False
| 267 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case (unittest.TestCase ):
lowerCAmelCase__ :Dict = JukeboxTokenizer
lowerCAmelCase__ :List[str] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _a ( self ) -> Dict:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ) -> Optional[Any]:
import torch
lowercase__ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase__ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 267 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : Optional[int] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> List[str]:
a_ : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
a_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Dict=3_7 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Any=None , ) -> Tuple:
a_ : Tuple = parent
a_ : str = batch_size
a_ : str = seq_length
a_ : str = is_training
a_ : Optional[Any] = use_input_mask
a_ : Optional[Any] = use_token_type_ids
a_ : Optional[int] = use_labels
a_ : Union[str, Any] = vocab_size
a_ : Tuple = hidden_size
a_ : Dict = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : int = hidden_act
a_ : Any = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : str = type_vocab_size
a_ : List[str] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : Optional[int] = num_choices
a_ : List[Any] = scope
a_ : Dict = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Union[str, Any] = None
if self.use_input_mask:
a_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Optional[int] = None
if self.use_token_type_ids:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Tuple = None
a_ : List[Any] = None
a_ : str = None
if self.use_labels:
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
a_ : str = TFMobileBertModel(config=SCREAMING_SNAKE_CASE__ )
a_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [input_ids, input_mask]
a_ : Tuple = model(SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
a_ : Any = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
a_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
a_ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
a_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
a_ : List[str] = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
a_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
a_ : List[str] = self.num_labels
a_ : str = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
a_ : Tuple = self.num_choices
a_ : Optional[int] = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
a_ : List[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
a_ : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
a_ : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a_ : Any = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
a_ : Any = self.num_labels
a_ : str = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
a_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> str:
a_ : List[str] = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
a_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = config_and_inputs
a_ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : Dict = TFMobileBertModelTest.TFMobileBertModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
a_ : Optional[Any] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : List[Any] = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
a_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
a_ : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
a_ : Tuple = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
| 443 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''autoformer'''
snake_case__ : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 2_5 , SCREAMING_SNAKE_CASE__ : int = 3 , **SCREAMING_SNAKE_CASE__ : int , ) -> Union[str, Any]:
# time series specific configuration
a_ : Optional[int] = prediction_length
a_ : Tuple = context_length if context_length is not None else prediction_length
a_ : Tuple = distribution_output
a_ : int = loss
a_ : Tuple = input_size
a_ : int = num_time_features
a_ : Dict = lags_sequence
a_ : List[str] = scaling
a_ : Tuple = num_dynamic_real_features
a_ : Dict = num_static_real_features
a_ : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a_ : Any = cardinality
else:
a_ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a_ : Tuple = embedding_dimension
else:
a_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a_ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
a_ : Tuple = d_model
a_ : int = encoder_attention_heads
a_ : Optional[int] = decoder_attention_heads
a_ : str = encoder_ffn_dim
a_ : List[str] = decoder_ffn_dim
a_ : Any = encoder_layers
a_ : Any = decoder_layers
a_ : List[Any] = dropout
a_ : Tuple = attention_dropout
a_ : Union[str, Any] = activation_dropout
a_ : List[Any] = encoder_layerdrop
a_ : List[str] = decoder_layerdrop
a_ : Optional[Any] = activation_function
a_ : Union[str, Any] = init_std
a_ : List[str] = use_cache
# Autoformer
a_ : Tuple = label_length
a_ : List[Any] = moving_average
a_ : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 443 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list[int]): # This function is recursive
A_ : Union[str, Any] = len(lowerCamelCase)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A_ : Optional[int] = array[0]
A_ : Union[str, Any] = False
A_ : str = 1
A_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
A_ : Dict = True
A_ : Dict = [element for element in array[i:] if element >= array[i]]
A_ : Dict = longest_subsequence(lowerCamelCase)
if len(lowerCamelCase) > len(lowerCamelCase):
A_ : Union[str, Any] = temp_array
else:
i += 1
A_ : Tuple = [element for element in array[1:] if element >= pivot]
A_ : Tuple = [pivot, *longest_subsequence(lowerCamelCase)]
if len(lowerCamelCase) > len(lowerCamelCase):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[int] = {'facebook/bart-base': BartForConditionalGeneration}
_lowercase : str = {'facebook/bart-base': BartTokenizer}
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=snake_case_ , default=snake_case_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=snake_case_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=snake_case_ , default=snake_case_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--config_name''' , type=snake_case_ , default=snake_case_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=snake_case_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=snake_case_ , default=snake_case_ , help='''Where to store the final ONNX file.''' )
__UpperCAmelCase = parser.parse_args()
return args
def lowercase__ ( snake_case_ :Dict , snake_case_ :str="cpu" ):
__UpperCAmelCase = model_dict[model_name].from_pretrained(snake_case_ ).to(snake_case_ )
__UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(snake_case_ )
if model_name in ["facebook/bart-base"]:
__UpperCAmelCase = 0
__UpperCAmelCase = None
__UpperCAmelCase = 0
return huggingface_model, tokenizer
def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :int , snake_case_ :Tuple , snake_case_ :Optional[Any] ):
model.eval()
__UpperCAmelCase = None
__UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(snake_case_ ) )
with torch.no_grad():
__UpperCAmelCase = '''My friends are cool but they eat too many carbs.'''
__UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='''pt''' ).to(model.device )
__UpperCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=snake_case_ , max_length=snake_case_ , early_stopping=snake_case_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=snake_case_ , )
logger.info('''Model exported to {}'''.format(snake_case_ ) )
__UpperCAmelCase = remove_dup_initializers(os.path.abspath(snake_case_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case_ ) )
__UpperCAmelCase = onnxruntime.InferenceSession(snake_case_ )
__UpperCAmelCase = ort_sess.run(
snake_case_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(snake_case_ ),
'''max_length''': np.array(snake_case_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowercase__ ( ):
__UpperCAmelCase = parse_args()
__UpperCAmelCase = 5
__UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__UpperCAmelCase = torch.device(args.device )
__UpperCAmelCase , __UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , snake_case_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(snake_case_ )
if args.max_length:
__UpperCAmelCase = args.max_length
if args.num_beams:
__UpperCAmelCase = args.num_beams
if args.output_file_path:
__UpperCAmelCase = args.output_file_path
else:
__UpperCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import qiskit
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
_lowercase : int = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 397 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowercase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowercase = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
__lowercase = time.time()
locka.acquire(UpperCamelCase__ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = """a""" * 1000 + """.lock"""
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
locka.acquire(0 )
| 616 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__ , n - 1 , UpperCamelCase__ ) * a) % mod
else:
__lowercase = binary_exponentiation(UpperCamelCase__ , n / 2 , UpperCamelCase__ )
return (b * b) % mod
# a prime number
UpperCAmelCase__ =701
UpperCAmelCase__ =10_0000_0000
UpperCAmelCase__ =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 616 | 1 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list]) -> list[list]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase):
__UpperCamelCase : Any = row[0]
for column_index, column in enumerate(_lowerCamelCase):
if magnitude == 0:
__UpperCamelCase : str = column
continue
__UpperCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
__UpperCamelCase : List[Any] = current_set[0]
__UpperCamelCase : int = [first_row]
__UpperCamelCase : Optional[int] = current_set[1::]
for row in current_set:
__UpperCamelCase : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase)
continue
for column_index in range(len(_lowerCamelCase)):
temp_row.append(first_row[column_index] - row[column_index])
final_set.append(_lowerCamelCase)
# Create next recursion iteration set
if len(final_set[0]) != 3:
__UpperCamelCase : str = final_set[0]
__UpperCamelCase : Any = []
__UpperCamelCase : int = []
for row in final_set[1::]:
current_first_column.append(row[0])
next_iteration.append(row[1::])
__UpperCamelCase : Dict = simplify(_lowerCamelCase)
for i in range(len(_lowerCamelCase)):
resultant[i].insert(0 , current_first_column[i])
resultant.insert(0 , _lowerCamelCase)
__UpperCamelCase : Dict = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list]) -> list:
'''simple docstring'''
if len(_lowerCamelCase) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1")
__UpperCamelCase : Dict = len(_lowerCamelCase) + 1
if any(len(_lowerCamelCase) != _length for item in equations):
raise IndexError("solve_simultaneous() requires n lists of length n+1")
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float)) for column in row):
raise ValueError("solve_simultaneous() requires lists of integers")
if len(_lowerCamelCase) == 1:
return [equations[0][-1] / equations[0][0]]
__UpperCamelCase : Any = equations.copy()
if any(0 in row for row in data_set):
__UpperCamelCase : Optional[int] = data_set.copy()
__UpperCamelCase : Union[str, Any] = []
for row_index, row in enumerate(_lowerCamelCase):
if 0 not in row:
__UpperCamelCase : List[Any] = data_set.pop(_lowerCamelCase)
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation")
data_set.insert(0 , _lowerCamelCase)
__UpperCamelCase : int = data_set.copy()
__UpperCamelCase : List[str] = simplify(_lowerCamelCase)
__UpperCamelCase : List[Any] = simplified[::-1]
__UpperCamelCase : list = []
for row in simplified:
__UpperCamelCase : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0)
continue
solutions.append(current_solution / row[-2])
continue
__UpperCamelCase : Optional[Any] = row.copy()[: len(_lowerCamelCase) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0)
if len(_lowerCamelCase) == 0:
solutions.append(0)
continue
__UpperCamelCase : int = temp_row[1::]
__UpperCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase)
__UpperCamelCase : str = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5)))
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 94 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 200) -> int:
'''simple docstring'''
__UpperCamelCase : Any = [1, 2, 5, 10, 20, 50, 100, 200]
__UpperCamelCase : Any = [0] * (pence + 1)
__UpperCamelCase : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCamelCase , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682 | 94 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( A__ ):
A__ = ['image_processor', 'tokenizer']
A__ = 'Pix2StructImageProcessor'
A__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Dict , _a : Union[str, Any] , _a : Optional[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =False
super().__init__(_a , _a )
def __call__( self : List[str] , _a : Any=None , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : Optional[int] = 2048 , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : str , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_SCREAMING_SNAKE_CASE =self.tokenizer
_SCREAMING_SNAKE_CASE =self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_SCREAMING_SNAKE_CASE =self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
_SCREAMING_SNAKE_CASE =self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
_SCREAMING_SNAKE_CASE =self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
_SCREAMING_SNAKE_CASE =text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_SCREAMING_SNAKE_CASE =text_encoding.pop('input_ids' )
else:
_SCREAMING_SNAKE_CASE =None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def A ( self : Optional[int] , *_a : Optional[int] , **_a : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def A ( self : List[str] , *_a : List[str] , **_a : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 405 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : List[Any] = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase : str = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
# source code of `config_class`
_SCREAMING_SNAKE_CASE =inspect.getsource(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_SCREAMING_SNAKE_CASE =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_SCREAMING_SNAKE_CASE =f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_SCREAMING_SNAKE_CASE =ckpt_name
break
return checkpoint
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_SCREAMING_SNAKE_CASE =get_checkpoint_from_config_class(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(sorted(_UpperCamelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 405 | 1 |
'''simple docstring'''
import random
def __lowerCamelCase ( A__ , A__ ) -> tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(A__ )
elif element > pivot:
greater.append(A__ )
else:
equal.append(A__ )
return less, equal, greater
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A__ ) or index < 0:
return None
UpperCamelCase = items[random.randint(0 , len(A__ ) - 1 )]
UpperCamelCase = 0
UpperCamelCase , UpperCamelCase , UpperCamelCase = _partition(A__ , A__ )
UpperCamelCase = len(A__ )
UpperCamelCase = len(A__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A__ , A__ )
# must be in larger
else:
return quick_select(A__ , index - (m + count) )
| 324 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = num_of_nodes
UpperCamelCase = []
UpperCamelCase = {}
def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def A ( self : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase = self.find_component(UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase = self.find_component(UpperCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCamelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = get_failure_array(__lowercase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase = 0, 0 # index into text, pattern
while i < len(__lowercase ):
if pattern[j] == text[i]:
if j == (len(__lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase = failure[j - 1]
continue
i += 1
return False
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0]
UpperCAmelCase = 0
UpperCAmelCase = 1
while j < len(__lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase = failure[i - 1]
continue
j += 1
failure.append(__lowercase )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase_ : List[Any] = '''abc1abc12'''
lowerCAmelCase_ : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase_ : Tuple = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase_ : List[str] = '''ABABX'''
lowerCAmelCase_ : Union[str, Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase_ : Tuple = '''AAAB'''
lowerCAmelCase_ : Tuple = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase_ : Optional[int] = '''abcdabcy'''
lowerCAmelCase_ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase_ : Union[str, Any] = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 673 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_snake_case = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase( cls ) -> int:
__UpperCamelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='test-model-flax' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='valid_org/test-model-flax-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCamelCase = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase = flatten_dict(unfreeze(model.params ) )
__UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 , msg=f"""{key} not identical""" )
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = flatten_dict(modela.params )
__UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__UpperCamelCase = False
return models_are_equal
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Union[str, Any]:
__UpperCamelCase = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase = FlaxBertModel(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='10KB' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
__UpperCamelCase = 'bert'
__UpperCamelCase = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 383 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _a( __A ):
lowerCamelCase__ :int = DistilBertTokenizer
lowerCamelCase__ :Optional[Any] = DistilBertTokenizerFast
lowerCamelCase__ :Dict = True
@slow
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case : str = tokenizer.encode("sequence builders" , add_special_tokens=__snake_case )
_snake_case : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__snake_case )
_snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
_snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 278 |
import numpy as np
__lowerCAmelCase :Dict = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _a:
def __init__( self ) -> None:
'''simple docstring'''
_snake_case : Optional[Any] = np.array(__snake_case )
def lowercase ( self , __snake_case ) -> np.ndarray:
'''simple docstring'''
_snake_case , _snake_case : List[Any] = np.where(letter == self.SQUARE )
_snake_case : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = message.lower()
_snake_case : List[Any] = message.replace(" " , "" )
_snake_case : Any = message.replace("j" , "i" )
_snake_case : Tuple = np.empty((2, len(__snake_case )) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Dict = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : List[Any] = numbers[1]
_snake_case : Any = first_step.reshape(2 * len(__snake_case ) )
_snake_case : Optional[Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Optional[int] = int(second_step[numbers_index * 2] )
_snake_case : List[str] = int(second_step[(numbers_index * 2) + 1] )
_snake_case : Any = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : Any = encoded_message + letter
return encoded_message
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = message.lower()
message.replace(" " , "" )
_snake_case : Tuple = np.empty(2 * len(__snake_case ) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : Any = numbers[1]
_snake_case : Optional[Any] = first_step.reshape((2, len(__snake_case )) )
_snake_case : Union[str, Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Dict = int(second_step[0, numbers_index] )
_snake_case : Optional[Any] = int(second_step[1, numbers_index] )
_snake_case : Union[str, Any] = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : str = decoded_message + letter
return decoded_message | 278 | 1 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a__ :
def __init__( self :Any , _lowerCamelCase :Dict , _lowerCamelCase :int = 13 , _lowerCamelCase :int = 64 , _lowerCamelCase :int = 2 , _lowerCamelCase :int = 3 , _lowerCamelCase :int = 3 , _lowerCamelCase :bool = True , _lowerCamelCase :bool = True , _lowerCamelCase :int = 128 , _lowerCamelCase :List[str]=[16, 32, 64, 128] , _lowerCamelCase :int = 7 , _lowerCamelCase :int = 4 , _lowerCamelCase :int = 37 , _lowerCamelCase :str = "gelu" , _lowerCamelCase :float = 0.1 , _lowerCamelCase :float = 0.1 , _lowerCamelCase :int = 10 , _lowerCamelCase :float = 0.02 , _lowerCamelCase :int = 2 , _lowerCamelCase :int = 1 , _lowerCamelCase :int = 128 , _lowerCamelCase :List[int] = [2, 2, 2, 2] , _lowerCamelCase :int = 2 , _lowerCamelCase :int = 2 , ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =parent
UpperCamelCase_ : Tuple =batch_size
UpperCamelCase_ : int =image_size
UpperCamelCase_ : Optional[int] =patch_size
UpperCamelCase_ : Any =num_channels
UpperCamelCase_ : List[str] =is_training
UpperCamelCase_ : Optional[Any] =use_labels
UpperCamelCase_ : List[Any] =hidden_size
UpperCamelCase_ : Optional[int] =num_hidden_layers
UpperCamelCase_ : Union[str, Any] =num_attention_heads
UpperCamelCase_ : Union[str, Any] =intermediate_size
UpperCamelCase_ : List[Any] =hidden_act
UpperCamelCase_ : Any =hidden_dropout_prob
UpperCamelCase_ : Dict =attention_probs_dropout_prob
UpperCamelCase_ : Union[str, Any] =type_sequence_label_size
UpperCamelCase_ : Dict =initializer_range
UpperCamelCase_ : str =encoder_stride
UpperCamelCase_ : List[Any] =num_attention_outputs
UpperCamelCase_ : str =embed_dim
UpperCamelCase_ : Tuple =embed_dim + 1
UpperCamelCase_ : Tuple =resolution
UpperCamelCase_ : Union[str, Any] =depths
UpperCamelCase_ : List[Any] =hidden_sizes
UpperCamelCase_ : Optional[Any] =dim
UpperCamelCase_ : Union[str, Any] =mlp_expansion_ratio
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : int =None
if self.use_labels:
UpperCamelCase_ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Any =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :List[str] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =TFEfficientFormerModel(config=__lowercase )
UpperCamelCase_ : int =model(__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.type_sequence_label_size
UpperCamelCase_ : Union[str, Any] =TFEfficientFormerForImageClassification(__lowercase )
UpperCamelCase_ : List[str] =model(__lowercase , labels=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ : Dict =1
UpperCamelCase_ : Optional[Any] =TFEfficientFormerForImageClassification(__lowercase )
UpperCamelCase_ : Dict =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : str =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.prepare_config_and_inputs()
UpperCamelCase_ : Union[str, Any] =config_and_inputs
UpperCamelCase_ : Union[str, Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =TFEfficientFormerModelTester(self )
UpperCamelCase_ : Optional[int] =ConfigTester(
self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] =model_class(__lowercase )
UpperCamelCase_ : Optional[int] =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] =[*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :Optional[int] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[int] ):
UpperCamelCase_ : Optional[int] =model_class(__lowercase )
UpperCamelCase_ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
UpperCamelCase_ : Union[str, Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ : int =getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
UpperCamelCase_ : List[str] =self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
UpperCamelCase_ : Any =seq_length * self.model_tester.chunk_length
else:
UpperCamelCase_ : str =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase_ : str =outputs.decoder_hidden_states
self.asseretIsInstance(__lowercase , (list, tuple) )
self.assertEqual(len(__lowercase ) , __lowercase )
UpperCamelCase_ : Dict =getattr(self.model_tester , 'seq_length' , __lowercase )
UpperCamelCase_ : str =getattr(self.model_tester , 'decoder_seq_length' , __lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Optional[int] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :Union[str, Any]=False ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Union[str, Any] =TFEfficientFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Tuple =True
UpperCamelCase_ : Optional[Any] =getattr(self.model_tester , 'seq_length' , __lowercase )
UpperCamelCase_ : Optional[Any] =getattr(self.model_tester , 'encoder_seq_length' , __lowercase )
UpperCamelCase_ : Any =getattr(self.model_tester , 'key_length' , __lowercase )
UpperCamelCase_ : Optional[Any] =getattr(self.model_tester , 'chunk_length' , __lowercase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
UpperCamelCase_ : Dict =encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] =True
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =True
UpperCamelCase_ : Dict =model_class(__lowercase )
UpperCamelCase_ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
UpperCamelCase_ : int =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ : Optional[int] =True
UpperCamelCase_ : Any =model_class(__lowercase )
UpperCamelCase_ : str =model(**self._prepare_for_class(__lowercase , __lowercase ) , training=__lowercase )
UpperCamelCase_ : Tuple =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase_ : int =model_class(__lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase_ : Tuple ={
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase_ : str =model(__lowercase )
self.assertTrue(outputs_dict is not None )
def A_ ( ):
UpperCamelCase_ : str =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Any =TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
UpperCamelCase_ : Optional[Any] =self.default_image_processor
UpperCamelCase_ : Optional[Any] =prepare_img()
UpperCamelCase_ : Optional[Any] =image_processor(images=__lowercase , return_tensors='tf' )
# forward pass
UpperCamelCase_ : Optional[Any] =model(**__lowercase , training=__lowercase )
# verify the logits
UpperCamelCase_ : Any =tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCamelCase_ : Optional[int] =tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Tuple =TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
UpperCamelCase_ : Dict =self.default_image_processor
UpperCamelCase_ : int =prepare_img()
UpperCamelCase_ : Dict =image_processor(images=__lowercase , return_tensors='tf' )
# forward pass
UpperCamelCase_ : Any =model(**__lowercase , training=__lowercase )
# verify the logits
UpperCamelCase_ : Any =tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowercase )
UpperCamelCase_ : List[str] =tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 357 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 12_80_22
a_ = 12_80_28
@require_sentencepiece
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = MaMaaaTokenizer
a_ : Optional[Any] = False
a_ : str = False
a_ : int = True
def _lowerCAmelCase ( self ):
super().setUp()
_lowerCamelCase : Any = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowerCamelCase : Optional[int] = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : str = Path(self.tmpdirname )
save_json(A , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['spm_file'] )
_lowerCamelCase : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **A ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
return (
"This is a test",
"This is a test",
)
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = '</s>'
_lowerCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [2, 3, 4, 5, 6] , )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
_lowerCamelCase : str = tokenizer.convert_tokens_to_string(A )
self.assertEqual(A , 'This is a test' )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
_lowerCamelCase : int = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_(unittest.TestCase ):
"""simple docstring"""
a_ : List[str] = """facebook/m2m100_418M"""
a_ : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
a_ : Dict = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
a_ : List[Any] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _lowerCAmelCase ( cls ):
_lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
_lowerCamelCase : Dict = 1
return cls
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = 'en'
_lowerCamelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowerCAmelCase ( self ):
self.assertIn(A , self.tokenizer.all_special_ids )
# fmt: off
_lowerCamelCase : Any = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_lowerCamelCase : Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
_lowerCamelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A )
_lowerCamelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.lang_token_to_id , A )
@require_torch
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = 'en'
_lowerCamelCase : Dict = 'fr'
_lowerCamelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='pt' )
_lowerCamelCase : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_lowerCamelCase : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_lowerCamelCase : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_lowerCamelCase : str = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 349 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Optional[Any] = """camembert"""
def __init__( self , A=3_0522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 349 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCamelCase ( a_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( a_ : np.array ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( a_ : Optional[int] , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE :int = sorted(zip(a_ , a_ ) , key=lambda a_ : x[0] / x[1] , reverse=a_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE :Union[str, Any] = list(accumulate(a_ ) )
__SCREAMING_SNAKE_CASE :Dict = bisect(a_ , a_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def lowercase (snake_case__ : bool , snake_case__ : bool ) -> Tuple:
'''simple docstring'''
def run_func(snake_case__ : Any ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : Optional[Any] , **snake_case__ : int ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : int , **snake_case__ : Tuple ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 42
_a = 42
_a = "TensorFlow"
@property
def __lowercase ( self : Optional[int] ):
return tf.__version__
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_inference )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_train )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_inference )
def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_train )
def __lowercase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , training=lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCAmelCase , training=lowerCAmelCase )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(lowerCAmelCase )
lowerCAmelCase = Memory(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(lowerCAmelCase )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 529 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : str=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __lowercase ( self : List[str] ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
lowerCAmelCase = ViTMSNModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int ):
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Tuple ):
lowerCAmelCase = ViTMSNModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __lowercase ( self : Optional[Any] ):
pass
def __lowercase ( self : int ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> Dict:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Dict ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __lowercase ( self : Tuple ):
torch.manual_seed(2 )
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 529 | 1 |
'''simple docstring'''
from collections import defaultdict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__lowercase )
if ret % 2 == 0:
cuts.append(__lowercase )
return ret
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase = 10, 9
_lowerCAmelCase = defaultdict(list)
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 565 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result | 686 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _a , unittest.TestCase ):
__lowercase : Dict = KandinskyVaaControlnetPipeline
__lowercase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase : str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase : Any = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase : Optional[Any] = False
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**snake_case_ )
return model
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case_ , )
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(snake_case_ )
else:
_UpperCamelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCamelCase = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**snake_case_ )
_UpperCamelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCamelCase = pipe(**self.get_dummy_inputs(snake_case_ ) )
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_UpperCamelCase = torch.from_numpy(np.array(snake_case_ ) ).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
_UpperCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
_UpperCamelCase = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCamelCase , _UpperCamelCase = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCamelCase = pipeline(
image_embeds=snake_case_ , negative_image_embeds=snake_case_ , hint=snake_case_ , generator=snake_case_ , num_inference_steps=1_00 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 720 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=64 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=1 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = q_groups
_UpperCamelCase = k_groups
_UpperCamelCase = v_groups
_UpperCamelCase = post_attention_groups
_UpperCamelCase = intermediate_groups
_UpperCamelCase = output_groups
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowercase : Dict = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : List[str] = True
__lowercase : Any = False
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_UpperCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
| 589 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowerCamelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 644 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''OwlViTFeatureExtractor''']
__magic_name__ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowercase__ ( ) -> int:
"""simple docstring"""
print('Making key files...')
make_key_files('rsa' , 10_24)
print('Key files generation successful.')
def lowercase__ ( _UpperCamelCase) -> List[str]:
"""simple docstring"""
print('Generating prime p...')
UpperCamelCase = rabinMiller.generate_large_prime(_UpperCamelCase)
print('Generating prime q...')
UpperCamelCase = rabinMiller.generate_large_prime(_UpperCamelCase)
UpperCamelCase = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...')
while True:
UpperCamelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size))
if cryptoMath.gcd(_UpperCamelCase , (p - 1) * (q - 1)) == 1:
break
print('Calculating d that is mod inverse of e...')
UpperCamelCase = cryptoMath.find_mod_inverse(_UpperCamelCase , (p - 1) * (q - 1))
UpperCamelCase = (n, e)
UpperCamelCase = (n, d)
return (public_key, private_key)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt') or os.path.exists(F'{name}_privkey.txt'):
print('\nWARNING:')
print(
F'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase = generate_key(_UpperCamelCase)
print(F'\nWriting public key to file {name}_pubkey.txt...')
with open(F'{name}_pubkey.txt' , 'w') as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}')
print(F'Writing private key to file {name}_privkey.txt...')
with open(F'{name}_privkey.txt' , 'w') as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}')
if __name__ == "__main__":
main()
| 280 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( SCREAMING_SNAKE_CASE ):
# A local function to see if a dot lands in the circle.
def is_in_circle(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) -> bool:
UpperCAmelCase__: Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase__: Optional[int] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase__: Optional[int] = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = 0.0 ,SCREAMING_SNAKE_CASE = 1.0 ,):
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = 0.0 ,SCREAMING_SNAKE_CASE = 1.0 ):
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
UpperCAmelCase__: Tuple = area_under_curve_estimator(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def _A ( SCREAMING_SNAKE_CASE ):
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase__: Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,0.0 ,2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 113 | 0 |
import random
from typing import Any
def UpperCAmelCase__ ( lowerCamelCase_ : list ):
for _ in range(len(lowerCamelCase_ ) ):
__a : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
__a : Any = random.randint(0 , len(lowerCamelCase_ ) - 1 )
__a , __a : str = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 577 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : int = equationa
__a , __a , __a : Dict = equationa
# Calculate the determinants of the matrices
__a : Dict = aa * ba - aa * ba
__a : List[Any] = ca * ba - ca * ba
__a : Tuple = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : int = determinant_x / determinant
__a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 577 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]=1_3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7 ,SCREAMING_SNAKE_CASE__ : Dict=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_7 ,SCREAMING_SNAKE_CASE__ : Tuple=2_3 ,SCREAMING_SNAKE_CASE__ : int=1_1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[Any] = act_dim
__lowerCamelCase : Optional[int] = state_dim
__lowerCamelCase : str = hidden_size
__lowerCamelCase : int = max_length
__lowerCamelCase : Tuple = is_training
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
__lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
__lowerCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1))
__lowerCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, 1))
__lowerCamelCase : Any = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1_0_0_0)
__lowerCamelCase : List[str] = random_attention_mask((self.batch_size, self.seq_length))
__lowerCamelCase : int = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCAmelCase ( self : Dict):
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,):
__lowerCamelCase : Union[str, Any] = DecisionTransformerModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.state_preds.shape ,states.shape)
self.parent.assertEqual(result.action_preds.shape ,actions.shape)
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[str] = config_and_inputs
__lowerCamelCase : Dict = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Any = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCAmelCase : Optional[Any] = ()
_UpperCAmelCase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCAmelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[Any] = False
def lowerCAmelCase ( self : int):
__lowerCamelCase : str = DecisionTransformerModelTester(self)
__lowerCamelCase : str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Tuple):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : str = DecisionTransformerModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : str = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__)] ,SCREAMING_SNAKE_CASE__)
@require_torch
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int):
__lowerCamelCase : Union[str, Any] = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase : str = 1_0 # defined by the RL environment, may be normalized
__lowerCamelCase : Union[str, Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
__lowerCamelCase : List[Any] = model.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model.config
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = torch.randn(1 ,1 ,config.state_dim).to(device=SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa) # env.reset()
__lowerCamelCase : Dict = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] ,device=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa).reshape(1 ,1 ,1)
__lowerCamelCase : Tuple = state
__lowerCamelCase : Optional[int] = torch.zeros(1 ,0 ,config.act_dim ,device=SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa)
__lowerCamelCase : str = torch.zeros(1 ,0 ,device=SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa)
__lowerCamelCase : int = torch.tensor(0 ,device=SCREAMING_SNAKE_CASE__ ,dtype=torch.long).reshape(1 ,1)
for step in range(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=SCREAMING_SNAKE_CASE__)] ,dim=1)
__lowerCamelCase : int = torch.cat([rewards, torch.zeros(1 ,1 ,device=SCREAMING_SNAKE_CASE__)] ,dim=1)
__lowerCamelCase : Any = torch.ones(1 ,states.shape[1]).to(dtype=torch.long ,device=states.device)
with torch.no_grad():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = model(
states=SCREAMING_SNAKE_CASE__ ,actions=SCREAMING_SNAKE_CASE__ ,rewards=SCREAMING_SNAKE_CASE__ ,returns_to_go=SCREAMING_SNAKE_CASE__ ,timesteps=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,)
self.assertEqual(action_pred.shape ,actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1E-4))
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim).to(device=SCREAMING_SNAKE_CASE__ ,dtype=torch.floataa),
1.0,
False,
{},
)
__lowerCamelCase : List[str] = action_pred[0, -1]
__lowerCamelCase : Optional[Any] = torch.cat([states, state] ,dim=1)
__lowerCamelCase : Union[str, Any] = returns_to_go[0, -1] - reward
__lowerCamelCase : List[str] = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1)] ,dim=1)
__lowerCamelCase : int = torch.cat(
[timesteps, torch.ones((1, 1) ,device=SCREAMING_SNAKE_CASE__ ,dtype=torch.long) * (step + 1)] ,dim=1)
| 652 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 1 |
from collections import Counter
from timeit import timeit
def _UpperCAmelCase (UpperCamelCase_ : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _UpperCAmelCase (UpperCamelCase_ : str = "" ):
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
return True
_lowerCAmelCase : Optional[int] = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : str = character_freq_dict.get(UpperCamelCase_ , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _UpperCAmelCase (UpperCamelCase_ : str = "" ):
'''simple docstring'''
print("""\nFor string = """ , UpperCamelCase_ , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCamelCase_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
_lowerCamelCase : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 196 |
import numpy as np
def _UpperCAmelCase (UpperCamelCase_ : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
import sys
def _A ( __snake_case :Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = [[0 for x in range(__snake_case )] for x in range(__snake_case )]
__SCREAMING_SNAKE_CASE = [[0 for x in range(__snake_case )] for x in range(__snake_case )]
for chain_length in range(2 , __snake_case ):
for a in range(1 , n - chain_length + 1 ):
__SCREAMING_SNAKE_CASE = a + chain_length - 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for c in range(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__SCREAMING_SNAKE_CASE = cost
__SCREAMING_SNAKE_CASE = c
return matrix, sol
def _A ( __snake_case :int , __snake_case :List[Any] , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if i == j:
print("A" + str(__snake_case ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__snake_case , __snake_case , optimal_solution[i][j] )
print_optiomal_solution(__snake_case , optimal_solution[i][j] + 1 , __snake_case )
print(")" , end=" " )
def _A ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [30, 35, 15, 5, 10, 20, 25]
__SCREAMING_SNAKE_CASE = len(__snake_case )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = matrix_chain_order(__snake_case )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__snake_case , 1 , n - 1 )
if __name__ == "__main__":
main()
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_A : List[Any] =[
'''good first issue''',
'''feature request''',
'''wip''',
]
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase__ : Optional[Any] = g.get_repo("""huggingface/accelerate""" )
lowerCamelCase__ : Union[str, Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=__snake_case )
lowerCamelCase__ : int = comments[0] if len(__snake_case ) > 0 else None
lowerCamelCase__ : str = dt.utcnow()
lowerCamelCase__ : Any = (current_time - issue.updated_at).days
lowerCamelCase__ : List[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __a ):
A__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def a_ ( __a ):
A__ , A__ = emb.weight.shape
A__ = nn.Linear(__a , __a , bias=__a )
A__ = emb.weight.data
return lin_layer
def a_ ( __a , __a=None ):
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
A__ = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
A__ = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
A__ = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
A__ = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
A__ = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
A__ = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
A__ = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
A__ = state_dict[old_key]
return new_dict
def a_ ( __a , __a , __a , __a , __a = WEIGHTS_NAME ):
A__ = []
A__ = 0
os.makedirs(__a , exist_ok=__a )
for expert in range(__a ):
A__ = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__a ):
A__ = torch.load(__a )['''model''']
remove_ignore_keys_(__a )
A__ = rename_fairseq_keys(__a , __a )
A__ = os.path.join(
__a , weights_name.replace('''.bin''' , f'''-{len(__a )+1:05d}-of-???.bin''' ) )
torch.save(__a , __a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__a )[0]].dtype )
# Add the last block
A__ = os.path.join(__a , weights_name.replace('''.bin''' , f'''-{len(__a )+1:05d}-of-???.bin''' ) )
A__ = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__a )
A__ = rename_fairseq_keys(__a , __a )
A__ = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__a ) == 1:
A__ = os.path.join(__a , __a )
torch.save(__a , __a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__a , __a )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(__a ):
A__ = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__a ):05d}.bin''' )
A__ = os.path.join(__a , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__a , os.path.join(__a , __a ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {'''total_size''': total_size}
A__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a , __a ) , '''w''' , encoding='''utf-8''' ) as f:
A__ = json.dumps(__a , indent=2 , sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case : str = parser.parse_args()
__snake_case , __snake_case : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__snake_case : List[Any] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 571 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__snake_case : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 571 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__A : int = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 75 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = "deta"
_UpperCamelCase:int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =backbone_config.pop("""model_type""" )
lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =backbone_config
lowerCamelCase_ =num_queries
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =init_xavier_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =auxiliary_loss
lowerCamelCase_ =position_embedding_type
# deformable attributes
lowerCamelCase_ =num_feature_levels
lowerCamelCase_ =encoder_n_points
lowerCamelCase_ =decoder_n_points
lowerCamelCase_ =two_stage
lowerCamelCase_ =two_stage_num_proposals
lowerCamelCase_ =with_box_refine
lowerCamelCase_ =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =mask_loss_coefficient
lowerCamelCase_ =dice_loss_coefficient
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
lowerCamelCase_ =focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )-> int:
return self.encoder_attention_heads
@property
def _snake_case ( self )-> int:
return self.d_model
def _snake_case ( self )-> str:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.backbone_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 75 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case ( lowerCAmelCase_=None ) -> int:
if subparsers is not None:
_snake_case = subparsers.add_parser('''test''' )
else:
_snake_case = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_snake_case = script_name
else:
_snake_case = f"""--config_file={args.config_file} {script_name}"""
_snake_case = ['''accelerate-launch'''] + test_args.split()
_snake_case = execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def snake_case ( ) -> List[Any]:
_snake_case = test_command_parser()
_snake_case = parser.parse_args()
test_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 103 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a__ , speech_processor=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , )
def a_ ( self , a__ = "auto" ):
if slice_size == "auto":
__SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def a_ ( self ):
self.enable_attention_slicing(a__ )
@torch.no_grad()
def __call__( self , a__ , a__=16000 , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.speech_processor.feature_extractor(
a__ , return_tensors="pt" , sampling_rate=a__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE : Optional[int] = self.speech_model.generate(a__ , max_length=480000 )
__SCREAMING_SNAKE_CASE : List[Any] = self.speech_processor.tokenizer.batch_decode(a__ , skip_special_tokens=a__ , normalize=a__ )[
0
]
if isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = len(a__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(a__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(a__ )}.' )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
a__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = text_embeddings.shape
__SCREAMING_SNAKE_CASE : int = text_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
__SCREAMING_SNAKE_CASE : Any = [""] * batch_size
elif type(a__ ) is not type(a__ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(a__ )} !='
f' {type(a__ )}.' )
elif isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [negative_prompt]
elif batch_size != len(a__ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(a__ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = negative_prompt
__SCREAMING_SNAKE_CASE : Optional[int] = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
a__ , padding="max_length" , max_length=a__ , truncation=a__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE : Dict = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE : int = uncond_embeddings.repeat(1 , a__ , 1 )
__SCREAMING_SNAKE_CASE : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(a__ , generator=a__ , device="cpu" , dtype=a__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__SCREAMING_SNAKE_CASE : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE : Dict = eta
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = 1 / 0.18215 * latents
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(a__ ).sample
__SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(a__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__ )
| 211 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : int ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : float = xa
_SCREAMING_SNAKE_CASE : float = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_SCREAMING_SNAKE_CASE : float = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
_SCREAMING_SNAKE_CASE : Tuple = x_na
_SCREAMING_SNAKE_CASE : int = x_na
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
return math.pow(_lowerCamelCase, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 716 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
_SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_SCREAMING_SNAKE_CASE : str = model(snake_case__ )["last_hidden_state"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 295 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = VQModel
UpperCamelCase_ = '''sample'''
@property
def A__ ( self : Optional[Any] , UpperCAmelCase : Any=(32, 32) ) -> str:
'''simple docstring'''
lowercase : List[Any] =4
lowercase : Any =3
lowercase : Optional[Any] =floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def A__ ( self : Any ) -> int:
'''simple docstring'''
return (3, 32, 32)
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : int ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase : List[Any] =self.dummy_input
return init_dict, inputs_dict
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : str ) -> Any:
'''simple docstring'''
lowercase , lowercase : List[Any] =VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase )
lowercase : Dict =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase : Any =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase : int =image.to(UpperCAmelCase )
with torch.no_grad():
lowercase : List[str] =model(UpperCAmelCase ).sample
lowercase : Dict =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase : List[Any] =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
| 94 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if config is None:
assert isinstance(self.model , UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
lowercase : Tuple =self.model.config
else:
lowercase : List[str] =config
lowercase : Any =data_args
lowercase : List[Any] =self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase : Optional[int] =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase : Tuple =label_smoothed_nll_loss
def A__ ( self : str , UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
if self.optimizer is None:
lowercase : Optional[Any] =['''bias''', '''LayerNorm.weight''']
lowercase : List[str] =[
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase : Optional[int] =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase : Tuple =Adafactor
lowercase : List[Any] ={'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase : int =AdamW
lowercase : Union[str, Any] ={
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase : Optional[int] =self.args.learning_rate
if self.sharded_ddp:
lowercase : Union[str, Any] =OSS(
params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , )
else:
lowercase : Dict =optimizer_cls(UpperCAmelCase , **UpperCAmelCase )
if self.lr_scheduler is None:
lowercase : str =self._get_lr_scheduler(UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def A__ ( self : Any , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase : Tuple =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase : str =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase : Any =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase )
return scheduler
def A__ ( self : str ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int ) -> int:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase : List[str] =model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
lowercase : Dict =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase , lowercase : List[Any] =model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2]
else:
# compute label smoothed loss
lowercase : Dict =model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
lowercase : int =torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
lowercase , lowercase : Union[str, Any] =self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A__ ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =inputs.pop('''labels''' )
lowercase , lowercase : Optional[int] =self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return loss
def A__ ( self : Tuple , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
lowercase : Any =self._prepare_inputs(UpperCAmelCase )
lowercase : List[Any] ={
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase : Tuple =self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase : Any =self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs['''max_length'''] )
lowercase : Optional[Any] =inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase , lowercase : Any =self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase : List[Any] =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase : Union[str, Any] =self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f' padded to `max_length`={max_length}' )
lowercase : Union[str, Any] =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase : Union[str, Any] =tensor
return padded_tensor
| 94 | 1 |
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = int(lowerCAmelCase__ )
if n_element < 1:
SCREAMING_SNAKE_CASE_ : Tuple = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE_ : Tuple = [1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (0, 0, 0)
SCREAMING_SNAKE_CASE_ : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
A = hamming(int(n))
print('-----------------------------------------------------')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------') | 700 |
from abc import ABC, abstractmethod
from typing import List, Optional
class __a ( __A ):
'''simple docstring'''
def __init__( self ):
# test for the above condition
self.test()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.advance()
if not self.does_advance(UpperCamelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.update(UpperCamelCase__ )
counter += 1
if counter > 10000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __snake_case ( self , UpperCamelCase__ ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __snake_case ( self , UpperCamelCase__ ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __snake_case ( self , UpperCamelCase__=False ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __a ( __A ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super(UpperCamelCase__ , self ).__init__()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or len(UpperCamelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
SCREAMING_SNAKE_CASE_ : List[str] = token_ids
SCREAMING_SNAKE_CASE_ : Tuple = len(self.token_ids )
SCREAMING_SNAKE_CASE_ : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE_ : Any = False
def __snake_case ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __snake_case ( self , UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __snake_case ( self , UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase__ )}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
if self.does_advance(UpperCamelCase__ ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[str] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[str] = 0
def __snake_case ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def __snake_case ( self , UpperCamelCase__=False ):
SCREAMING_SNAKE_CASE_ : str = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE_ : List[Any] = self.seqlen
SCREAMING_SNAKE_CASE_ : Any = self.fulfilled_idx
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.completed
return new_constraint
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=True ):
SCREAMING_SNAKE_CASE_ : Optional[int] = max([len(UpperCamelCase__ ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE_ : List[Any] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE_ : Dict = root
for tidx, token_id in enumerate(UpperCamelCase__ ):
if token_id not in level:
SCREAMING_SNAKE_CASE_ : Tuple = {}
SCREAMING_SNAKE_CASE_ : List[str] = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
SCREAMING_SNAKE_CASE_ : Tuple = root
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE_ : Optional[Any] = start[current_token]
SCREAMING_SNAKE_CASE_ : Optional[int] = list(start.keys() )
return next_tokens
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.next_tokens(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 0
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = list(root.values() )
if len(UpperCamelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase__ ) for nn in next_nodes] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.count_leaves(UpperCamelCase__ )
return len(UpperCamelCase__ ) != leaf_count
class __a ( __A ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super(UpperCamelCase__ , self ).__init__()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or len(UpperCamelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase__ , UpperCamelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = DisjunctiveTrie(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = nested_token_ids
SCREAMING_SNAKE_CASE_ : List[str] = self.trie.max_height
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : List[str] = False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase__ ) == 0:
return None
else:
return token_list
def __snake_case ( self , UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase__ )}''' )
SCREAMING_SNAKE_CASE_ : str = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __snake_case ( self , UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase__ )}''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Dict = False
if self.does_advance(UpperCamelCase__ ):
self.current_seq.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = True
else:
SCREAMING_SNAKE_CASE_ : Tuple = True
self.reset()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE_ : Optional[Any] = completed
return stepped, completed, reset
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = []
def __snake_case ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __snake_case ( self , UpperCamelCase__=False ):
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.seqlen
SCREAMING_SNAKE_CASE_ : Optional[int] = self.current_seq
SCREAMING_SNAKE_CASE_ : List[str] = self.completed
return new_constraint
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE_ : Dict = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE_ : Tuple = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
self.init_state()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Optional[int] = [constraint.copy(stateful=UpperCamelCase__ ) for constraint in self.constraints]
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = constraint.advance()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
token_list.append(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
token_list.extend(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
token_list.append(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
token_list.extend(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
else:
return token_list
def __snake_case ( self , UpperCamelCase__ ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.add(UpperCamelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __snake_case ( self , UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = False, False
if self.completed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.inprogress_constraint.update(UpperCamelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : Dict = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE_ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE_ : Tuple = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = pending_constraint.update(UpperCamelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE_ : List[str] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE_ : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE_ : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __snake_case ( self , UpperCamelCase__=True ):
SCREAMING_SNAKE_CASE_ : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE_ : List[str] = [
constraint.copy(stateful=UpperCamelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.inprogress_constraint.copy(stateful=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 97 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class lowercase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = " " ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = sentence_delimiter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return list(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = []
for sent_idx, sentence in enumerate(UpperCamelCase_ ):
chars.extend(self.process_string(UpperCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )["wer"]
UpperCamelCase__ :Any = 0
UpperCamelCase__ :Dict = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Dict = jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 189 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''CLIPFeatureExtractor''']
__snake_case = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 | 1 |
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = val
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase = Node(_SCREAMING_SNAKE_CASE )
else:
self.left.insert(_SCREAMING_SNAKE_CASE )
elif val > self.val:
if self.right is None:
_UpperCAmelCase = Node(_SCREAMING_SNAKE_CASE )
else:
self.right.insert(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = val
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[Any]:
# Recursive traversal
if root:
inorder(root.left , snake_case )
res.append(root.val )
inorder(root.right , snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
# Build BST
if len(snake_case ) == 0:
return arr
_UpperCAmelCase = Node(arr[0] )
for i in range(1 , len(snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase = []
inorder(snake_case , snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13])) | 717 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _A ( __lowercase ):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if tokenize_kwargs is None:
_UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_UpperCAmelCase = truncation
_UpperCAmelCase = tokenize_kwargs
_UpperCAmelCase = {}
if return_tensors is not None:
_UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.framework
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) | 175 | 0 |
from __future__ import annotations
lowerCAmelCase = tuple[int, int, int]
lowerCAmelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowerCAmelCase = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowerCAmelCase = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowerCAmelCase = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowerCAmelCase = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowerCAmelCase = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowerCAmelCase = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowerCAmelCase = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowerCAmelCase = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(lowercase_ ) )) < 3:
__lowercase= F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowercase_ )
# Checks if rotor positions are valid
__lowercase= rotpos
if not 0 < rotorposa <= len(lowercase_ ):
__lowercase= F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
__lowercase= F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
__lowercase= F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase_ )
# Validates string and returns dict
__lowercase= _plugboard(lowercase_ )
return rotpos, rotsel, pbdict
def _lowerCamelCase( lowercase__ ) -> dict[str, str]:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
__lowercase= F'Plugboard setting isn\'t type string ({type(lowercase_ )})'
raise TypeError(lowercase_ )
elif len(lowercase_ ) % 2 != 0:
__lowercase= F'Odd number of symbols ({len(lowercase_ )})'
raise Exception(lowercase_ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
__lowercase= set()
for i in pbstring:
if i not in abc:
__lowercase= F'\'{i}\' not in list of symbols'
raise Exception(lowercase_ )
elif i in tmppbl:
__lowercase= F'Duplicate symbol ({i})'
raise Exception(lowercase_ )
else:
tmppbl.add(lowercase_ )
del tmppbl
# Created the dictionary
__lowercase= {}
for j in range(0 , len(lowercase_ ) - 1 , 2 ):
__lowercase= pbstring[j + 1]
__lowercase= pbstring[j]
return pb
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = (rotora, rotora, rotora) , lowercase__ = "" , ) -> str:
'''simple docstring'''
__lowercase= text.upper()
__lowercase= _validator(
lowercase_ , lowercase_ , plugb.upper() )
__lowercase= rotor_position
__lowercase= rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__lowercase= []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__lowercase= plugboard[symbol]
# rotor ra --------------------------
__lowercase= abc.index(lowercase_ ) + rotorposa
__lowercase= rotora[index % len(lowercase_ )]
# rotor rb --------------------------
__lowercase= abc.index(lowercase_ ) + rotorposa
__lowercase= rotora[index % len(lowercase_ )]
# rotor rc --------------------------
__lowercase= abc.index(lowercase_ ) + rotorposa
__lowercase= rotora[index % len(lowercase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__lowercase= reflector[symbol]
# 2nd rotors
__lowercase= abc[rotora.index(lowercase_ ) - rotorposa]
__lowercase= abc[rotora.index(lowercase_ ) - rotorposa]
__lowercase= abc[rotora.index(lowercase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__lowercase= plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__lowercase= 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__lowercase= 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__lowercase= 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = 'This is my Python script that emulates the Enigma machine from WWII.'
lowerCAmelCase = (1, 1, 1)
lowerCAmelCase = 'pictures'
lowerCAmelCase = (rotora, rotora, rotora)
lowerCAmelCase = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 230 |
def UpperCamelCase (lowercase_: str ) -> Optional[Any]:
A__ : Optional[int] = []
A__ : Optional[int] = set({"""(""", """[""", """{"""} )
A__ : str = set({""")""", """]""", """}"""} )
A__ : Optional[Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def UpperCamelCase () -> List[str]:
A__ : Optional[Any] = input("""Enter sequence of brackets: """ )
if is_balanced(lowercase_ ):
print(lowercase_ , """is balanced""" )
else:
print(lowercase_ , """is not balanced""" )
if __name__ == "__main__":
main()
| 456 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Dict = False
lowerCamelCase : Optional[Any] = 3.0
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__a ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , __a )
@require_multi_gpu
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__snake_case =Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case =torch.nn.Linear(100, 200)
__snake_case =accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case =""""""
__snake_case =model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 717 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(lowerCamelCase )]
lowerCAmelCase = [process_name[i] for i in np.argsort(lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_process
for i in range(0 , lowerCamelCase ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case =5
__snake_case =["""A""", """B""", """C""", """D""", """E"""]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 513 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = 13
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = 99
_lowerCAmelCase : int = 384
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Dict = 37
_lowerCAmelCase : str = "gelu"
_lowerCAmelCase : Optional[Any] = 0.1
_lowerCAmelCase : str = 0.1
_lowerCAmelCase : List[str] = 512
_lowerCAmelCase : Any = 16
_lowerCAmelCase : Any = 2
_lowerCAmelCase : str = 0.02
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : str = 4
_lowerCAmelCase : Optional[int] = 128
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : Any = 9
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[Any] = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=__a, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFConvBertModel(config=__a)
_lowerCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFConvBertForMaskedLM(config=__a)
_lowerCAmelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFConvBertForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Dict = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_choices
_lowerCAmelCase : str = TFConvBertForMultipleChoice(config=__a)
_lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Optional[int] = TFConvBertForTokenClassification(config=__a)
_lowerCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = TFConvBertForQuestionAnswering(config=__a)
_lowerCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFConvBertModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = True
if hasattr(__a, "use_cache"):
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
_lowerCAmelCase : Tuple = getattr(self.model_tester, "key_length", __a)
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = self._prepare_for_class(__a, __a)
_lowerCAmelCase : List[Any] = model_class(__a)
_lowerCAmelCase : Tuple = len(model(__a))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a, saved_model=__a)
_lowerCAmelCase : Dict = os.path.join(__a, "saved_model", "1")
_lowerCAmelCase : str = tf.keras.models.load_model(__a)
_lowerCAmelCase : Dict = model(__a)
if self.is_encoder_decoder:
_lowerCAmelCase : Tuple = outputs["encoder_hidden_states"]
_lowerCAmelCase : List[str] = outputs["encoder_attentions"]
else:
_lowerCAmelCase : Tuple = outputs["hidden_states"]
_lowerCAmelCase : Any = outputs["attentions"]
self.assertEqual(len(__a), __a)
_lowerCAmelCase : Tuple = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a), __a)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
_lowerCAmelCase : int = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
_lowerCAmelCase : str = getattr(self.model_tester, "key_length", __a)
_lowerCAmelCase : List[str] = getattr(self.model_tester, "key_length", __a)
def check_decoder_attentions_output(__a):
_lowerCAmelCase : Any = len(__a)
self.assertEqual(out_len % 2, 0)
_lowerCAmelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(__a):
_lowerCAmelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = model_class(__a)
_lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(__a, __a))
_lowerCAmelCase : Tuple = len(__a)
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
if self.is_encoder_decoder:
_lowerCAmelCase : List[str] = model_class(__a)
_lowerCAmelCase : Optional[Any] = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_decoder_attentions_output(__a)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Any = model_class(__a)
_lowerCAmelCase : List[str] = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
# Check attention is always last and order is fine
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[Any] = model_class(__a)
_lowerCAmelCase : Optional[Any] = model(self._prepare_for_class(__a, __a))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(__a))
self.assertEqual(model.config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
_lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Any = model(__a)[0]
_lowerCAmelCase : str = [1, 6, 768]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 500 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_snake_case = "scheduler_config.json"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = 5
lowerCamelCase__ = 6
lowerCamelCase__ = 7
lowerCamelCase__ = 8
lowerCamelCase__ = 9
lowerCamelCase__ = 10
lowerCamelCase__ = 11
lowerCamelCase__ = 12
lowerCamelCase__ = 13
lowerCamelCase__ = 14
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
class UpperCAmelCase_ :
lowerCamelCase__ = SCHEDULER_CONFIG_NAME
lowerCamelCase__ = []
lowerCamelCase__ = True
@classmethod
def snake_case__ ( cls, __a = None, __a = None, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=__a, subfolder=__a, return_unused_kwargs=__a, return_commit_hash=__a, **__a, )
return cls.from_config(__a, return_unused_kwargs=__a, **__a)
def snake_case__ ( self, __a, __a = False, **__a):
'''simple docstring'''
self.save_config(save_directory=__a, push_to_hub=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Tuple = list(set([cls.__name__] + cls._compatibles))
_lowerCAmelCase : Optional[int] = importlib.import_module(__name__.split(".")[0])
_lowerCAmelCase : Union[str, Any] = [
getattr(__a, __a) for c in compatible_classes_str if hasattr(__a, __a)
]
return compatible_classes
| 500 | 1 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowercase = """scheduler_config.json"""
class lowercase_ ( A ):
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = 5
@dataclass
class lowercase_ ( A ):
__lowerCamelCase = 42
class lowercase_ :
__lowerCamelCase = SCHEDULER_CONFIG_NAME
__lowerCamelCase = ["dtype"]
__lowerCamelCase = []
__lowerCamelCase = True
@classmethod
def _snake_case ( cls , __A = None , __A = None , __A=False , **__A , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , **__A , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =cls.from_config(__A , return_unused_kwargs=__A , **__A )
if hasattr(__A , '''create_state''' ) and getattr(__A , '''has_state''' , __A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _snake_case ( self , __A , __A = False , **__A ) -> Union[str, Any]:
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def _snake_case ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def _snake_case ( cls ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =list(set([cls.__name__] + cls._compatibles ) )
SCREAMING_SNAKE_CASE_ : str =importlib.import_module(__name__.split('''.''' )[0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : Tuple[int] ) -> jnp.ndarray:
assert len(UpperCAmelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCAmelCase_ ) - x.ndim) ) , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str=0.999 , UpperCAmelCase_ : Union[str, Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(UpperCAmelCase_ : Dict ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE_ : List[str] =[]
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict =i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ : Any =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCAmelCase_ ) / alpha_bar(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return jnp.array(UpperCAmelCase_ , dtype=UpperCAmelCase_ )
@flax.struct.dataclass
class lowercase_ :
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
@classmethod
def _snake_case ( cls , __A ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler.config
if config.trained_betas is not None:
SCREAMING_SNAKE_CASE_ : List[str] =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ : Dict =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ : List[str] =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ : Tuple =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
SCREAMING_SNAKE_CASE_ : List[Any] =1.0 - betas
SCREAMING_SNAKE_CASE_ : str =jnp.cumprod(__A , axis=0 )
return cls(
alphas=__A , betas=__A , alphas_cumprod=__A , )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : CommonSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ) -> str:
SCREAMING_SNAKE_CASE_ : str =state.alphas_cumprod
SCREAMING_SNAKE_CASE_ : Union[str, Any] =alphas_cumprod[timesteps] ** 0.5
SCREAMING_SNAKE_CASE_ : Union[str, Any] =sqrt_alpha_prod.flatten()
SCREAMING_SNAKE_CASE_ : List[Any] =broadcast_to_shape_from_left(UpperCAmelCase_ , original_samples.shape )
SCREAMING_SNAKE_CASE_ : Optional[int] =(1 - alphas_cumprod[timesteps]) ** 0.5
SCREAMING_SNAKE_CASE_ : int =sqrt_one_minus_alpha_prod.flatten()
SCREAMING_SNAKE_CASE_ : List[Any] =broadcast_to_shape_from_left(UpperCAmelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : CommonSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_sqrt_alpha_prod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : CommonSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_sqrt_alpha_prod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 431 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 431 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_UpperCamelCase = get_logger(__name__)
class __lowercase :
def __init__( self , A_ = None ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = (
os.path.join(A_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase : Tuple = Extractor
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase : Any = os.path.abspath(A_ )
return os.path.join(self.extract_dir , hash_url_to_filename(A_ ) )
def UpperCamelCase__ ( self , A_ , A_ ) ->bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(A_ ) and not (os.path.isdir(A_ ) and os.listdir(A_ ))
)
def UpperCamelCase__ ( self , A_ , A_ = False ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.extractor.infer_extractor_format(A_ )
if not extractor_format:
return input_path
__lowerCAmelCase : Optional[Any] = self._get_output_path(A_ )
if self._do_extract(A_ , A_ ):
self.extractor.extract(A_ , A_ , A_ )
return output_path
class __lowercase (_UpperCAmelCase ):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
...
class __lowercase (_UpperCAmelCase , _UpperCAmelCase ):
_UpperCamelCase = []
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->List[str]:
'''simple docstring'''
with open(A_ , '''rb''' ) as f:
return f.read(A_ )
@classmethod
def UpperCamelCase__ ( cls , A_ , A_ = b"" ) ->bool:
'''simple docstring'''
if not magic_number:
__lowerCAmelCase : int = max(len(A_ ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase : Optional[Any] = cls.read_magic_number(A_ , A_ )
except OSError:
return False
return any(magic_number.startswith(A_ ) for cls_magic_number in cls.magic_numbers )
class __lowercase (_UpperCAmelCase ):
@classmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->bool:
'''simple docstring'''
return tarfile.is_tarfile(A_ )
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->List[str]:
'''simple docstring'''
def resolved(A_ ) -> str:
return os.path.realpath(os.path.abspath(A_ ) )
def badpath(A_ , A_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A_ , A_ ) ).startswith(A_ )
def badlink(A_ , A_ ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase : int = resolved(os.path.join(A_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A_ )
__lowerCAmelCase : Any = resolved(A_ )
for finfo in members:
if badpath(finfo.name , A_ ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A_ , A_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A_ , A_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
os.makedirs(A_ , exist_ok=A_ )
__lowerCAmelCase : Tuple = tarfile.open(A_ )
tar_file.extractall(A_ , members=TarExtractor.safemembers(A_ , A_ ) )
tar_file.close()
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
with gzip.open(A_ , '''rb''' ) as gzip_file:
with open(A_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , A_ , A_ = b"" ) ->bool:
'''simple docstring'''
if super().is_extractable(A_ , magic_number=A_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A_ , '''rb''' ) as fp:
__lowerCAmelCase : int = _EndRecData(A_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase : List[Any] = fp.read(A_ ) # CD is where we expect it to be
if len(A_ ) == sizeCentralDir:
__lowerCAmelCase : Any = struct.unpack(A_ , A_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
os.makedirs(A_ , exist_ok=A_ )
with zipfile.ZipFile(A_ , '''r''' ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
with lzma.open(A_ ) as compressed_file:
with open(A_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(A_ , exist_ok=A_ )
__lowerCAmelCase : Union[str, Any] = rarfile.RarFile(A_ )
rf.extractall(A_ )
rf.close()
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
__lowerCAmelCase : List[Any] = zstd.ZstdDecompressor()
with open(A_ , '''rb''' ) as ifh, open(A_ , '''wb''' ) as ofh:
dctx.copy_stream(A_ , A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
with bza.open(A_ , '''rb''' ) as compressed_file:
with open(A_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(A_ , exist_ok=A_ )
with pyazr.SevenZipFile(A_ , '''r''' ) as archive:
archive.extractall(A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(A_ , '''rb''' ) as compressed_file:
with open(A_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(A_ , A_ )
class __lowercase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCamelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ) ->Dict:
'''simple docstring'''
return max(
len(A_ )
for extractor in cls.extractors.values()
if issubclass(A_ , A_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(A_ , magic_number_length=A_ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , A_ , A_ = False ) ->bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=A_ , )
__lowerCAmelCase : Optional[int] = cls.infer_extractor_format(A_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , A_ ) ->str: # <Added version="2.4.0"/>
'''simple docstring'''
__lowerCAmelCase : int = cls._get_magic_number_max_length()
__lowerCAmelCase : List[Any] = cls._read_magic_number(A_ , A_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A_ , magic_number=A_ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , A_ , A_ , A_ = None , A_ = "deprecated" , ) ->None:
'''simple docstring'''
os.makedirs(os.path.dirname(A_ ) , exist_ok=A_ )
# Prevent parallel extractions
__lowerCAmelCase : Union[str, Any] = str(Path(A_ ).with_suffix('''.lock''' ) )
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A_ , A_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=A_ , )
__lowerCAmelCase : Tuple = extractor if extractor != '''deprecated''' else extractor_format
else:
__lowerCAmelCase : Dict = cls.extractors[extractor_format]
return extractor.extract(A_ , A_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=A_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A_ ):
return extractor.extract(A_ , A_ )
| 492 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 492 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ : Any = get_activation('swish')
self.assertIsInstance(_snake_case , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _snake_case ( self) -> Any:
UpperCAmelCase_ : Optional[int] = get_activation('silu')
self.assertIsInstance(_snake_case , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _snake_case ( self) -> Any:
UpperCAmelCase_ : List[str] = get_activation('mish')
self.assertIsInstance(_snake_case , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Optional[int] = get_activation('gelu')
self.assertIsInstance(_snake_case , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 717 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 471 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : int , lowerCAmelCase : UNetaDModel , lowerCAmelCase : UNetaDModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : List[str] , ):
super().__init__()
lowerCAmelCase = value_function
lowerCAmelCase = unet
lowerCAmelCase = scheduler
lowerCAmelCase = env
lowerCAmelCase = env.get_dataset()
lowerCAmelCase = {}
for key in self.data.keys():
try:
lowerCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
lowerCAmelCase = {}
for key in self.data.keys():
try:
lowerCAmelCase = self.data[key].std()
except: # noqa: E722
pass
lowerCAmelCase = env.observation_space.shape[0]
lowerCAmelCase = env.action_space.shape[0]
def __lowercase ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] ):
return (x_in - self.means[key]) / self.stds[key]
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : int ):
return x_in * self.stds[key] + self.means[key]
def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[int] ):
if type(lowerCAmelCase ) is dict:
return {k: self.to_torch(lowerCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCAmelCase , device=self.unet.device )
def __lowercase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[str] ):
for key, val in cond.items():
lowerCAmelCase = val.clone()
return x_in
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = x.shape[0]
lowerCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCAmelCase = torch.full((batch_size,) , lowerCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , lowerCAmelCase ).sample
lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCAmelCase = self.scheduler._get_variance(lowerCAmelCase )
lowerCAmelCase = torch.exp(0.5 * posterior_variance )
lowerCAmelCase = model_std * grad
lowerCAmelCase = 0
lowerCAmelCase = x.detach()
lowerCAmelCase = x + scale * grad
lowerCAmelCase = self.reset_xa(lowerCAmelCase , lowerCAmelCase , self.action_dim )
lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , lowerCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , predict_epsilon=lowerCAmelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowerCAmelCase = self.reset_xa(lowerCAmelCase , lowerCAmelCase , self.action_dim )
lowerCAmelCase = self.to_torch(lowerCAmelCase )
return x, y
def __call__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : str=0.1 ):
# normalize the observations and create batch dimension
lowerCAmelCase = self.normalize(lowerCAmelCase , """observations""" )
lowerCAmelCase = obs[None].repeat(lowerCAmelCase , axis=0 )
lowerCAmelCase = {0: self.to_torch(lowerCAmelCase )}
lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCAmelCase = randn_tensor(lowerCAmelCase , device=self.unet.device )
lowerCAmelCase = self.reset_xa(lowerCAmelCase , lowerCAmelCase , self.action_dim )
lowerCAmelCase = self.to_torch(lowerCAmelCase )
# run the diffusion process
lowerCAmelCase , lowerCAmelCase = self.run_diffusion(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# sort output trajectories by value
lowerCAmelCase = y.argsort(0 , descending=lowerCAmelCase ).squeeze()
lowerCAmelCase = x[sorted_idx]
lowerCAmelCase = sorted_values[:, :, : self.action_dim]
lowerCAmelCase = actions.detach().cpu().numpy()
lowerCAmelCase = self.de_normalize(lowerCAmelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
lowerCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
lowerCAmelCase = np.random.randint(0 , lowerCAmelCase )
lowerCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 169 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['input_values', 'attention_mask']
def __init__( self : Union[str, Any] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1_6000 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : bool = False , lowerCAmelCase : int = 80 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 64 , lowerCAmelCase : str = "hann_window" , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 80 , lowerCAmelCase : float = 7600 , lowerCAmelCase : float = 1e-10 , lowerCAmelCase : int = 2 , lowerCAmelCase : bool = True , **lowerCAmelCase : Tuple , ):
super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = do_normalize
lowerCAmelCase = return_attention_mask
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = frame_signal_scale
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = reduction_factor
lowerCAmelCase = win_length * sampling_rate // 1000
lowerCAmelCase = hop_length * sampling_rate // 1000
lowerCAmelCase = optimal_fft_length(self.sample_size )
lowerCAmelCase = (self.n_fft // 2) + 1
lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowercase ( lowerCAmelCase : List[np.ndarray] , lowerCAmelCase : List[np.ndarray] , lowerCAmelCase : float = 0.0 ):
if attention_mask is not None:
lowerCAmelCase = np.array(lowerCAmelCase , np.intaa )
lowerCAmelCase = []
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(lowerCAmelCase )
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __lowercase ( self : int , lowerCAmelCase : np.ndarray , ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self : int , lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Union[str, Any] , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
lowerCAmelCase = self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase = inputs_target["""input_values"""]
lowerCAmelCase = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def __lowercase ( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : bool = False , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : List[Any] , ):
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase = [self._extract_mel_features(lowerCAmelCase ) for waveform in speech]
lowerCAmelCase = BatchFeature({"""input_values""": features} )
lowerCAmelCase = self.num_mel_bins
else:
lowerCAmelCase = BatchFeature({"""input_values""": speech} )
lowerCAmelCase = self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = feature_size_hack
# convert input values to correct format
lowerCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def __lowercase ( self : Dict ):
lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 169 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
lowerCAmelCase__: Dict = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase__: Any = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[str] = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : List[str] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if images.ndim == 3:
SCREAMING_SNAKE_CASE_ : Tuple = images[None, ...]
SCREAMING_SNAKE_CASE_ : int = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 311 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A : Any = logging.get_logger(__name__)
__A : Any = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__A : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'whisper'
__magic_name__ = ['past_key_values']
__magic_name__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_1865 , snake_case_=80 , snake_case_=6 , snake_case_=4 , snake_case_=6 , snake_case_=4 , snake_case_=1536 , snake_case_=1536 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=5_0257 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=256 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=False , snake_case_=1500 , snake_case_=448 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=None , snake_case_=[220, 5_0256] , snake_case_=False , snake_case_=256 , snake_case_=False , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_=7 , **snake_case_ , ):
_A = vocab_size
_A = num_mel_bins
_A = d_model
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_layers
_A = decoder_attention_heads
_A = decoder_ffn_dim
_A = encoder_ffn_dim
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_A = classifier_proj_size
_A = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
_A = median_filter_width
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , suppress_tokens=snake_case_ , begin_suppress_tokens=snake_case_ , **snake_case_ , )
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
_A = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_A = {0: 'batch'}
else:
_A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='inputs' )
return common_inputs
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 2_2050 , snake_case_ = 5.0 , snake_case_ = 220 , ):
_A = OrderedDict()
_A = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case_ , framework=snake_case_ , sampling_rate=snake_case_ , time_duration=snake_case_ , frequency=snake_case_ , )
_A = encoder_inputs['input_features'].shape[2]
_A = encoder_sequence_length // 2 if self.use_past else seq_length
_A = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_A = encoder_inputs.pop('input_features' )
_A = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_A = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowerCAmelCase__ ( self ):
return 1E-3
| 27 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 711 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A ) | 59 | 0 |
"""simple docstring"""
import numpy as np
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
UpperCAmelCase : int = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase : Dict = np.zeros((n + 1,) )
UpperCAmelCase : str = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Any = f(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Dict = f(x + h , y[k] + h * ka )
UpperCAmelCase : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A: Optional[Any] = logging.getLogger(__name__)
def _snake_case ( UpperCamelCase : Any=2 , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : int = 10 , UpperCamelCase : int = 2 ):
def get_dataset(UpperCamelCase : Optional[int] ):
UpperCAmelCase : Optional[int] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase : Tuple = get_dataset(UpperCamelCase )
UpperCAmelCase : Optional[int] = get_dataset(UpperCamelCase )
UpperCAmelCase : Tuple = DataLoader(UpperCamelCase , shuffle=UpperCamelCase , batch_size=UpperCamelCase , num_workers=4 )
UpperCAmelCase : Dict = DataLoader(UpperCamelCase , shuffle=UpperCamelCase , batch_size=UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=None ):
UpperCAmelCase : List[Any] = []
for epoch in range(UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase : Any = batch
UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
UpperCAmelCase : List[str] = torch.nn.functional.mse_loss(UpperCamelCase , UpperCamelCase )
accelerator.backward(UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCAmelCase : str = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : str = DummyModel()
UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : List[Any] = dummy_dataloaders()
UpperCAmelCase : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : Tuple = Accelerator(project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : List[str] = DummyModel()
UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Any = dummy_dataloaders()
# Train baseline
UpperCAmelCase : Union[str, Any] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
UpperCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """initial""" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
UpperCAmelCase : List[Any] = train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : Tuple = DummyModel()
UpperCAmelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = dummy_dataloaders()
UpperCAmelCase : List[str] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.load_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Any = model.a.item(), model.b.item()
UpperCAmelCase : int = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = train(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save everything
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """checkpoint""" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(_SCREAMING_SNAKE_CASE )
test_rands += train(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Dict = DummyModel()
UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : int = dummy_dataloaders()
UpperCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) : int = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
UpperCAmelCase : Dict = train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : Tuple = DummyModel()
UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Dict = dummy_dataloaders()
UpperCAmelCase : Any = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
((UpperCAmelCase) , (UpperCAmelCase)) : int = model.a.item(), model.b.item()
UpperCAmelCase : Any = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = train(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : int = model.a.item(), model.b.item()
UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([1, 2, 3] )
UpperCAmelCase : Dict = torch.tensor([2, 3, 4] )
UpperCAmelCase : Optional[int] = DummyModel()
UpperCAmelCase : Optional[Any] = torch.optim.Adam(net.parameters() )
UpperCAmelCase : Tuple = Accelerator()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Optional[int] = DummyModel()
UpperCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase : str = torch.optim.lr_scheduler.StepLR(_SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase : List[str] = dummy_dataloaders()
UpperCAmelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : Optional[Any] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCAmelCase : Tuple = scheduler.state_dict()
train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(_SCREAMING_SNAKE_CASE , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : List[Any] = DummyModel()
UpperCAmelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE , total_limit=2 )
# Train baseline
UpperCAmelCase : Optional[int] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A: Dict = "/tmp/accelerate/state_checkpointing"
A: Optional[int] = DummyModel()
A: int = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A: Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A , A: Any = dummy_dataloaders()
A: int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A: str = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A , A , A , A , A: Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A , A: Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A: Union[str, Any] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
A: int = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
A: int = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
A: int = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 160 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : List[str] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Tuple = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
torch.manual_seed(0)
_lowerCamelCase : Optional[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_lowerCamelCase : Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Optional[int] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCamelCase : int = DDPMScheduler()
_lowerCamelCase : str = AudioDiffusionPipeline(vqvae=_UpperCamelCase , unet=self.dummy_unet , mel=_UpperCamelCase , scheduler=_UpperCamelCase)
_lowerCamelCase : Optional[int] = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : Any = torch.Generator(device=_UpperCamelCase).manual_seed(42)
_lowerCamelCase : Union[str, Any] = pipe(generator=_UpperCamelCase , steps=4)
_lowerCamelCase : List[str] = output.audios[0]
_lowerCamelCase : Tuple = output.images[0]
_lowerCamelCase : Optional[int] = torch.Generator(device=_UpperCamelCase).manual_seed(42)
_lowerCamelCase : List[str] = pipe(generator=_UpperCamelCase , steps=4 , return_dict=_UpperCamelCase)
_lowerCamelCase : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCamelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10]
_lowerCamelCase : str = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""")[:10]
_lowerCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0
_lowerCamelCase : List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCamelCase : str = DDIMScheduler()
_lowerCamelCase : Any = self.dummy_vqvae_and_unet
_lowerCamelCase : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCamelCase , scheduler=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
np.random.seed(0)
_lowerCamelCase : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,))
_lowerCamelCase : Optional[int] = torch.Generator(device=_UpperCamelCase).manual_seed(42)
_lowerCamelCase : List[str] = pipe(raw_audio=_UpperCamelCase , generator=_UpperCamelCase , start_step=5 , steps=10)
_lowerCamelCase : List[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCamelCase : int = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10]
_lowerCamelCase : Optional[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
_lowerCamelCase : Any = self.dummy_unet_condition
_lowerCamelCase : int = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCamelCase , mel=_UpperCamelCase , scheduler=_UpperCamelCase)
_lowerCamelCase : Optional[int] = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
np.random.seed(0)
_lowerCamelCase : Dict = torch.rand((1, 1, 10))
_lowerCamelCase : List[Any] = pipe(generator=_UpperCamelCase , encoding=_UpperCamelCase)
_lowerCamelCase : Tuple = output.images[0]
_lowerCamelCase : int = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10]
_lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = torch_device
_lowerCamelCase : Optional[Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""")
_lowerCamelCase : int = pipe.to(_UpperCamelCase)
pipe.set_progress_bar_config(disable=_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device=_UpperCamelCase).manual_seed(42)
_lowerCamelCase : Union[str, Any] = pipe(generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.audios[0]
_lowerCamelCase : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCamelCase : Dict = np.frombuffer(image.tobytes() , dtype="""uint8""")[:10]
_lowerCamelCase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :int ) -> list[int]:
_A = 0
_A = len(_snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_A = i + 1
else:
_A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
_UpperCAmelCase : List[str] = {"""bert_for_seq_generation""": 5_12}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = []
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int="<s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Dict="<::::>" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : Any , ) -> None:
lowerCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , sep_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCamelCase__ : List[str] = vocab_file
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
@property
def A_ ( self : List[str] ) -> Optional[int]:
return self.sp_model.get_piece_size()
def A_ ( self : Tuple ) -> Any:
lowerCamelCase__ : Tuple = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.__dict__.copy()
lowerCamelCase__ : List[Any] = None
return state
def __setstate__( self : Dict , UpperCAmelCase : Optional[int] ) -> List[Any]:
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : Tuple ) -> Union[str, Any]:
return self.sp_model.piece_to_id(UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : Tuple ) -> Dict:
lowerCamelCase__ : str = self.sp_model.IdToPiece(UpperCAmelCase )
return token
def A_ ( self : Optional[Any] , UpperCAmelCase : int ) -> Union[str, Any]:
lowerCamelCase__ : str = []
lowerCamelCase__ : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
lowerCamelCase__ : Tuple = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : Any = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
lowerCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 295 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCAmelCase ( _lowercase ):
__lowerCAmelCase : Union[str, Any] = '''sew-d'''
def __init__( self : List[str] , a : Dict=32 , a : Optional[int]=768 , a : Optional[Any]=12 , a : Optional[int]=12 , a : Tuple=3072 , a : Any=2 , a : List[Any]=512 , a : Tuple=256 , a : Optional[int]=True , a : List[str]=True , a : List[Any]=("p2c", "c2p") , a : Dict="layer_norm" , a : List[str]="gelu_python" , a : int=0.1 , a : Tuple=0.1 , a : Dict=0.1 , a : List[str]=0.0 , a : List[str]=0.1 , a : Optional[Any]=0.02 , a : Optional[int]=1E-7 , a : Optional[Any]=1E-5 , a : Optional[int]="group" , a : Union[str, Any]="gelu" , a : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a : Union[str, Any]=False , a : str=128 , a : Tuple=16 , a : List[Any]=True , a : Tuple=0.05 , a : List[Any]=10 , a : List[str]=2 , a : str=0.0 , a : Union[str, Any]=10 , a : Tuple=0 , a : Tuple="mean" , a : Union[str, Any]=False , a : Dict=False , a : str=256 , a : int=0 , a : Dict=1 , a : List[str]=2 , **a : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(A_ )
lowercase = list(A_ )
lowercase = list(A_ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = squeeze_factor
lowercase = max_position_embeddings
lowercase = position_buckets
lowercase = share_att_key
lowercase = relative_attention
lowercase = norm_rel_ebd
lowercase = list(A_ )
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layer_norm_eps
lowercase = feature_layer_norm_eps
lowercase = initializer_range
lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# sequence classification
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
@property
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 700 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Tuple = '''vit_msn'''
def __init__( self : Any , a : Optional[int]=768 , a : Optional[Any]=12 , a : Optional[int]=12 , a : List[str]=3072 , a : str="gelu" , a : List[Any]=0.0 , a : Union[str, Any]=0.0 , a : List[Any]=0.02 , a : int=1E-06 , a : Any=224 , a : Optional[Any]=16 , a : List[str]=3 , a : Any=True , **a : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**a )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias | 396 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_ = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
a_ : Optional[Any] = key.replace(".model.1.bias", ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
a_ : Optional[int] = key.replace(".model.1.weight", ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
a_ : str = key.replace(".model.3.bias", ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
a_ : Tuple = key.replace(".model.3.weight", ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a_ : Tuple = key.replace("conditioner_blocks.0", "conditioner_blocks" )
if "prime_prior" in key:
a_ : Tuple = key.replace("prime_prior", "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[int] = key.replace(".emb.", "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k", ".codebook" )
if "y_emb." in key:
return key.replace("y_emb.", "metadata_embedding." )
if "x_emb.emb." in key:
a_ : List[str] = key.replace("0.x_emb.emb", "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln", "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln", ".layer_norm" )
if "_ln" in key:
return key.replace("_ln", "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj", "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out", "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out", "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb", "embed_tokens" )
return key
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : Any = {}
import re
a_ : Optional[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ : Tuple = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ : str = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ : Dict = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ : Tuple = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ : List[str] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ : List[str] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a_ : str = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ : Union[str, Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = regex_match.groups()
a_ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
a_ : str = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
a_ : Tuple = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : str = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = regex_match.groups()
a_ : int = int(groups[2] ) * 2 + int(groups[3] )
a_ : List[Any] = {"1": 1, "3": 2}[groups[-2]]
a_ : Optional[Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
a_ : Optional[int] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ : Dict = prefix + resnet_block
a_ : str = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = regex_match.groups()
a_ : Any = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
a_ : List[Any] = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ )
a_ : Any = regex_match.groups()
a_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
a_ : int = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : Any = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = regex_match.groups()
a_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : List[str] = {"1": 1, "3": 2}[groups[-2]]
a_ : Any = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
a_ : Any = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ : int = prefix + resnet_block
a_ : Union[str, Any] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = regex_match.groups()
a_ : int = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
a_ : Tuple = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = regex_match.groups()
a_ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : List[Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
a_ : List[str] = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : Dict = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = regex_match.groups()
a_ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : int = {"1": 1, "3": 2}[groups[-2]]
a_ : Union[str, Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
a_ : str = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ : Optional[int] = prefix + resnet_block
a_ : Any = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = regex_match.groups()
a_ : List[Any] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
a_ : Optional[int] = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# keep original key
else:
a_ : Optional[Any] = original_key
a_ : Dict = replace_key(SCREAMING_SNAKE_CASE__ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
a_ : Optional[int] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
a_ : Optional[Any] = original_key
a_ : int = original_key
a_ : str = value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> str:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
a_ : List[str] = requests.get(F"""{PREFIX}{file}""", allow_redirects=SCREAMING_SNAKE_CASE__ )
os.makedirs(F"""{pytorch_dump_folder_path}/""", exist_ok=SCREAMING_SNAKE_CASE__ )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""", "wb" ).write(r.content )
a_ : Any = MODEL_MAPPING[model_name.split("/" )[-1]]
a_ : Optional[Any] = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : int = JukeboxModel(SCREAMING_SNAKE_CASE__ )
a_ : int = []
a_ : Optional[int] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : str = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )["model"]
a_ : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
a_ : Optional[Any] = old_dic[k]
elif k.endswith(".w" ):
a_ : Union[str, Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : str = old_dic[k]
else:
a_ : List[str] = old_dic[k]
a_ : Optional[int] = "vqvae" if i == 0 else F"""priors.{3 - i}"""
a_ : int = fix_jukebox_keys(SCREAMING_SNAKE_CASE__, model.state_dict(), SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
weight_dict.append(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""", "w" ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 237 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["input_values", "attention_mask"]
def __init__( self , a_ = 1 , a_ = 1_6_0_0_0 , a_ = 0.0 , a_ = False , a_ = 8_0 , a_ = 1_6 , a_ = 6_4 , a_ = "hann_window" , a_ = 1.0 , a_ = 8_0 , a_ = 7_6_0_0 , a_ = 1e-10 , a_ = 2 , a_ = True , **a_ , ):
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
a_ : Optional[Any] = do_normalize
a_ : Any = return_attention_mask
a_ : int = num_mel_bins
a_ : int = hop_length
a_ : List[str] = win_length
a_ : Dict = win_function
a_ : Optional[Any] = frame_signal_scale
a_ : List[str] = fmin
a_ : Any = fmax
a_ : str = mel_floor
a_ : int = reduction_factor
a_ : Tuple = win_length * sampling_rate // 1_0_0_0
a_ : int = hop_length * sampling_rate // 1_0_0_0
a_ : Dict = optimal_fft_length(self.sample_size )
a_ : int = (self.n_fft // 2) + 1
a_ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
a_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( a_ , a_ , a_ = 0.0 ):
if attention_mask is not None:
a_ : int = np.array(a_ , np.intaa )
a_ : Tuple = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
a_ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a_ : Tuple = padding_value
normed_input_values.append(a_ )
else:
a_ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , a_ , ):
a_ : Optional[Any] = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a_ : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
a_ : Optional[Any] = None
if audio_target is not None:
a_ : Optional[Any] = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
a_ : Dict = inputs_target["input_values"]
a_ : int = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a_ : List[Any] = decoder_attention_mask
return inputs
def snake_case_ ( self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
a_ : List[str] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a_ : Optional[int] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
a_ : Dict = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a_ : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a_ : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
a_ : Dict = [self._extract_mel_features(a_ ) for waveform in speech]
a_ : List[Any] = BatchFeature({"input_values": features} )
a_ : str = self.num_mel_bins
else:
a_ : List[str] = BatchFeature({"input_values": speech} )
a_ : Any = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
a_ : Tuple = feature_size_hack
# convert input values to correct format
a_ : Union[str, Any] = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a_ : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a_ : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
a_ : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a_ : Union[str, Any] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a_ : int = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
a_ : Optional[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def snake_case_ ( self ):
a_ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a_ : List[str] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output | 237 | 1 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
_snake_case = ValueError("a should be a positive number" )
raise my_error
_snake_case = [1]
_snake_case , _snake_case , _snake_case = (0, 0, 0)
_snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__magic_name__ : Any = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__magic_name__ : Any = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 368 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def UpperCamelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size["shortest_edge"] * h / w )
_snake_case = self.size["shortest_edge"]
elif w > h:
_snake_case = self.size["shortest_edge"]
_snake_case = int(self.size["shortest_edge"] * w / h )
else:
_snake_case = self.size["shortest_edge"]
_snake_case = self.size["shortest_edge"]
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = DetaImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase( self ):
# prepare image and target
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"image_id": 39_769, "annotations": target}
# encode them
_snake_case = DetaImageProcessor()
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
# prepare image, target and masks_path
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_snake_case = DetaImageProcessor(format="coco_panoptic" )
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 368 | 1 |
'''simple docstring'''
from collections import defaultdict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : Optional[Any] = first_str.lower().strip()
UpperCAmelCase__ : Optional[Any] = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase__ : Any = first_str.replace(''' ''' , '''''' )
UpperCAmelCase__ : Dict = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
return False
# Default values for count should be 0
UpperCAmelCase__ : defaultdict[str, int] = defaultdict(lowerCAmelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCAmelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = input('''Enter the first string ''').strip()
UpperCamelCase__ = input('''Enter the second string ''').strip()
UpperCamelCase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 75 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 1 |
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase ( *UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__lowerCAmelCase : Tuple = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase : Union[str, Any] = torch.device("cuda", local_rank)
__lowerCAmelCase : str = socket.gethostname()
__lowerCAmelCase : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase : Union[str, Any] = dist.get_rank()
__lowerCAmelCase : int = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 704 | '''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def A ( UpperCamelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def A ( UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
| 48 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : str = logging.getLogger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=-1 ):
'''simple docstring'''
__a : Tuple = label_idx
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Any = mode.value
__a : List[Any] = os.path.join(__a , f"""{mode}.txt""" )
__a : Optional[Any] = 1
__a : str = []
with open(__a , encoding='utf-8' ) as f:
__a : Tuple = []
__a : Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
__a : str = []
__a : int = []
else:
__a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__a : Tuple = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Dict = mode.value
__a : List[str] = os.path.join(__a , f"""{mode}.txt""" )
__a : Tuple = 1
__a : List[str] = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
__a : Any = []
__a : Optional[int] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = 0
for sentence in parse_incr(__a ):
__a : int = preds_list[example_id]
__a : str = ''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 476 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: Optional[Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Dict = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__snake_case: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case: Optional[Any] = logging.get_logger(__name__)
__snake_case: Tuple = {"vocab_file": "sentencepiece.model"}
__snake_case: int = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
__snake_case: Union[str, Any] = {
"google/rembert": 2_56,
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ : int = do_lower_case
a_ : int = remove_space
a_ : Dict = keep_accents
a_ : Union[str, Any] = vocab_file
a_ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.__dict__.copy()
a_ : Optional[Any] = None
return state
def __setstate__( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = d
a_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
a_ : Tuple = self.sp_model.EncodeAsPieces(lowerCAmelCase_ )
return pieces
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = self.sp_model.decode_pieces(lowerCAmelCase_ )
return out_string
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : List[Any] = [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : Optional[int] = [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
a_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 460 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 9 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase_ = "sshleifer/bart-tiny-random"
lowercase_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict )-> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def a ( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Tuple = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a ( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Dict = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def a ( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : Union[str, Any] = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a ( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase_ ,*UpperCAmelCase_ : str = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 470 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : Any = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : Dict = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : Optional[int] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Any = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {"""model_type""": all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = """AutoTokenizer"""
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = """Update"""
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = """, """.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : str = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 262 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Tuple = num_choices
UpperCAmelCase : int = relative_attention
UpperCAmelCase : Optional[Any] = position_biased_input
UpperCAmelCase : List[Any] = pos_att_type
UpperCAmelCase : str = scope
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : int = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_config()
UpperCAmelCase : Optional[int] = 300
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = DebertaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = DebertaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Dict = DebertaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = DebertaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = DebertaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Dict = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = DebertaModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = DebertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 160 | 0 |
'''simple docstring'''
def A_ ( ) ->List[Any]:
lowercase_ = []
lowercase_ = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
lowercase_ = """""".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 603 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : float , **lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = kwargs.pop("""padding_side""" , """right""" )
lowercase_ = kwargs.pop("""return_attention_mask""" , lowercase_ )
super().__init__(**lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowercase_ : Union[bool, str, PaddingStrategy] = True , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
lowercase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase_ = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
lowercase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
lowercase_ = """tf"""
elif is_torch_tensor(lowercase_ ):
lowercase_ = """pt"""
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
lowercase_ = """np"""
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase_ = to_numpy(lowercase_ )
else:
lowercase_ = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase_ = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowercase_ = []
for i in range(lowercase_ ):
lowercase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase_ = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase_ = PaddingStrategy.MAX_LENGTH
lowercase_ = {}
for i in range(lowercase_ ):
# padding
lowercase_ = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase_ = []
if value.dtype is np.dtype(np.floataa ):
lowercase_ = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase_ = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase_ = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase_ = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowercase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowercase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowercase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = len(lowercase_ ) > max_length
if needs_to_be_truncated:
lowercase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[int]=False , lowercase_ : List[str]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
lowercase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
lowercase_ = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = padding
else:
lowercase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 603 | 1 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def _a ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
UpperCamelCase_ , padding="longest" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCamelCase_ ) == "1":
lowerCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
set_seed(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase__ = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 339 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 475 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :PreTrainedTokenizer , a_ :int , a_ :Optional[int] = None , ) -> int:
lowercase :Dict = {}
if train_file is not None:
lowercase :Union[str, Any] = [train_file]
if eval_file is not None:
lowercase :str = [eval_file]
if test_file is not None:
lowercase :Dict = [test_file]
lowercase :Optional[Any] = datasets.load_dataset('''csv''' , data_files=a_)
lowercase :int = list(ds[list(files.keys())[0]].features.keys())
lowercase :Dict = features_name.pop(a_)
lowercase :str = list(set(ds[list(files.keys())[0]][label_name]))
lowercase :List[str] = {label: i for i, label in enumerate(a_)}
lowercase :Any = tokenizer.model_input_names
lowercase :Optional[Any] = {}
if len(a_) == 1:
for k in files.keys():
lowercase :Dict = ds[k].map(
lambda a_: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=a_ , max_length=a_ , padding='''max_length''') , batched=a_ , )
elif len(a_) == 2:
for k in files.keys():
lowercase :Optional[Any] = ds[k].map(
lambda a_: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=a_ , max_length=a_ , padding='''max_length''' , ) , batched=a_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase :Any = {k: v for k, v in ex.items() if k in input_names}
lowercase :List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase :Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowercase :Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase :Dict = {k: v for k, v in ex.items() if k in input_names}
lowercase :Optional[int] = labelaid[ex[label_name]]
yield (d, label)
lowercase :List[str] = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase :Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
lowercase :Optional[int] = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase :List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
lowercase :Optional[Any] = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase :int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
__A : int = field(metadata={"help": "Which column contains the label"} )
__A : str = field(default=__UpperCAmelCase , metadata={"help": "The path of the training file"} )
__A : Optional[str] = field(default=__UpperCAmelCase , metadata={"help": "The path of the development file"} )
__A : Optional[str] = field(default=__UpperCAmelCase , metadata={"help": "The path of the test file"} )
__A : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __magic_name__ :
__A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__A : bool = field(default=__UpperCAmelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCamelCase () -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase :Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
lowercase , lowercase , lowercase :Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, """
F"""16-bits training: {training_args.fpaa}""")
logger.info(F"""Training/evaluation parameters {training_args}""")
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase :Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase , lowercase , lowercase , lowercase :int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=a_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase :Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(a_) , labelaid=a_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase :int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path) , config=a_ , cache_dir=model_args.cache_dir , )
def compute_metrics(a_ :EvalPrediction) -> Dict:
lowercase :Tuple = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase :Any = TFTrainer(
model=a_ , args=a_ , train_dataset=a_ , eval_dataset=a_ , compute_metrics=a_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowercase :Union[str, Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
lowercase :str = trainer.evaluate()
lowercase :int = os.path.join(training_args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
results.update(a_)
return results
if __name__ == "__main__":
main()
| 475 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: Any = LongformerTokenizer
a__: Union[str, Any] = True
a__: Tuple = LongformerTokenizerFast
a__: List[str] = True
def _lowerCAmelCase ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Tuple = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowercase : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : str = {'''unk_token''': '''<unk>'''}
lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def _lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Dict ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _lowerCAmelCase ( self : int , lowerCAmelCase : Dict ):
lowercase : Dict = '''lower newer'''
lowercase : Optional[int] = '''lower newer'''
return input_text, output_text
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : str = '''lower newer'''
lowercase : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : List[Any] = tokenizer.tokenize(lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowercase : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _lowerCAmelCase ( self : Dict ):
lowercase : int = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowercase : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
lowercase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
lowercase : Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
lowercase : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
lowercase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : int = self.get_tokenizer()
lowercase : str = '''Encode this sequence.'''
lowercase : Dict = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase : List[Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
lowercase : int = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase : str = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
# Testing spaces after special tokens
lowercase : List[str] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )} ) # mask token has a left space
lowercase : Tuple = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
lowercase : Tuple = '''Encode <mask> sequence'''
lowercase : Optional[Any] = '''Encode <mask>sequence'''
lowercase : Any = tokenizer.encode(lowerCAmelCase )
lowercase : str = encoded.index(lowerCAmelCase )
lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
lowercase : Optional[int] = tokenizer.encode(lowerCAmelCase )
lowercase : int = encoded.index(lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
pass
def _lowerCAmelCase ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowercase : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowercase : List[str] = '''A, <mask> AllenNLP sentence.'''
lowercase : List[Any] = tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
lowercase : Dict = tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _lowerCAmelCase ( self : Tuple ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCAmelCase )
def _lowerCAmelCase ( self : Dict ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Optional[int] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase : str = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Optional[int] = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Optional[Any] = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : str = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Tuple = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ), len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : List[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : int = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Tuple = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowercase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ), 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
| 583 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
snake_case__ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : List[Any] , **lowerCAmelCase : List[Any] ):
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , lowerCAmelCase : Union[np.ndarray, bytes, str] , **lowerCAmelCase : Tuple ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( self : str , **lowerCAmelCase : Optional[int] ):
lowercase : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase : Any = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]="This is a sound of {}." ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : str = requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase , '''rb''' ) as f:
lowercase : List[Any] = f.read()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Any = ffmpeg_read(lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowercase : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
lowercase : Any = candidate_labels
lowercase : Optional[int] = [hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowercase : Tuple = self.tokenizer(lowerCAmelCase , return_tensors=self.framework , padding=lowerCAmelCase )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _lowerCAmelCase ( self : str , lowerCAmelCase : Union[str, Any] ):
lowercase : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowercase : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowerCAmelCase ):
lowercase : int = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : str = self.model(**lowerCAmelCase , **lowerCAmelCase )
lowercase : Tuple = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str ):
lowercase : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowercase : Any = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase : str = logits.softmax(dim=0 )
lowercase : List[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowercase : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 583 | 1 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCamelCase = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase = 2.0 * image - 1.0
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=0.9995 )-> Tuple:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , np.ndarray ):
UpperCamelCase = True
UpperCamelCase = va.device
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = np.sum(va * va / (np.linalg.norm(UpperCAmelCase_ ) * np.linalg.norm(UpperCAmelCase_ )) )
if np.abs(UpperCAmelCase_ ) > DOT_THRESHOLD:
UpperCamelCase = (1 - t) * va + t * va
else:
UpperCamelCase = np.arccos(UpperCAmelCase_ )
UpperCamelCase = np.sin(UpperCAmelCase_ )
UpperCamelCase = theta_a * t
UpperCamelCase = np.sin(UpperCAmelCase_ )
UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase = sin_theta_t / sin_theta_a
UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return va
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
UpperCamelCase = F.normalize(UpperCAmelCase_ , dim=-1 )
UpperCamelCase = F.normalize(UpperCAmelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
for param in model.parameters():
UpperCamelCase = value
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , UpperCAmelCase_ : CLIPFeatureExtractor , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None , )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , clip_model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , coca_model=UpperCAmelCase_ , coca_tokenizer=UpperCAmelCase_ , coca_transform=UpperCAmelCase_ , )
UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase_ )
else feature_extractor.size["shortest_edge"]
)
UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCAmelCase_ )
set_requires_grad(self.clip_model , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" )-> Tuple:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> str:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
set_requires_grad(self.vae , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
set_requires_grad(self.unet , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple )-> int:
"""simple docstring"""
# get the original timestep using init_timestep
UpperCamelCase = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None )-> List[str]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase_ )}" )
UpperCamelCase = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ )
]
UpperCamelCase = torch.cat(UpperCAmelCase_ , dim=0 )
else:
UpperCamelCase = self.vae.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 0.18215 * init_latents
UpperCamelCase = init_latents.repeat_interleave(UpperCAmelCase_ , dim=0 )
UpperCamelCase = randn_tensor(init_latents.shape , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
UpperCamelCase = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.coca_transform(UpperCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extractor.preprocess(UpperCAmelCase_ )
UpperCamelCase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase = self.clip_model.get_image_features(UpperCAmelCase_ )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
UpperCamelCase = image_embeddings_clip.repeat_interleave(UpperCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , )-> Any:
"""simple docstring"""
UpperCamelCase = latents.detach().requires_grad_()
UpperCamelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase = torch.sqrt(UpperCAmelCase_ )
UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase_ ):
UpperCamelCase = self.scheduler.sigmas[index]
UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * sample
UpperCamelCase = self.vae.decode(UpperCAmelCase_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase_ )
UpperCamelCase = self.normalize(UpperCAmelCase_ ).to(latents.dtype )
UpperCamelCase = self.clip_model.get_image_features(UpperCAmelCase_ )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
UpperCamelCase = spherical_dist_loss(UpperCAmelCase_ , UpperCAmelCase_ ).mean() * clip_guidance_scale
UpperCamelCase = -torch.autograd.grad(UpperCAmelCase_ , UpperCAmelCase_ )[0]
if isinstance(self.scheduler , UpperCAmelCase_ ):
UpperCamelCase = latents.detach() + grads * (sigma**2)
UpperCamelCase = noise_pred_original
else:
UpperCamelCase = noise_pred_original - torch.sqrt(UpperCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : float = 0.6 , UpperCAmelCase_ : Optional[int] = 50 , UpperCAmelCase_ : Optional[float] = 7.5 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[float] = 100 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , )-> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(UpperCAmelCase_ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(UpperCAmelCase_ , torch.Generator ) and batch_size > 1:
UpperCamelCase = [generator] + [None] * (batch_size - 1)
UpperCamelCase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase = ", ".join(UpperCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(UpperCAmelCase_ )
if style_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(UpperCAmelCase_ )
# get prompt text embeddings for content and style
UpperCamelCase = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = text_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# set timesteps
UpperCamelCase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase = {}
if accepts_offset:
UpperCamelCase = 1
self.scheduler.set_timesteps(UpperCAmelCase_ , **UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
UpperCamelCase = timesteps[:1].repeat(UpperCAmelCase_ )
# Preprocess image
UpperCamelCase = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
UpperCamelCase = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
UpperCamelCase = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if clip_guidance_scale > 0:
UpperCamelCase = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = slerp(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = content_text_input.input_ids.shape[-1]
UpperCamelCase = self.tokenizer([""] , padding="max_length" , max_length=UpperCAmelCase_ , return_tensors="pt" )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase = uncond_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device="cpu" , dtype=UpperCAmelCase_ ).to(
self.device )
else:
UpperCamelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
# check if the scheduler accepts generator
UpperCamelCase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase = generator
with self.progress_bar(total=UpperCAmelCase_ ):
for i, t in enumerate(UpperCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase = self.cond_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(UpperCAmelCase_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 556 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = False )-> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
UpperCamelCase = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
UpperCamelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase = primes[:idx]
break
UpperCamelCase , UpperCamelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase = False
for r in range(UpperCAmelCase_ ):
UpperCamelCase = pow(UpperCAmelCase_ , d * 2**r , UpperCAmelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ( )-> None:
"""simple docstring"""
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 556 | 1 |
class a__ :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
A__ = ""
A__ = ""
A__ = []
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ = self.__min_dist_top_down_dp(lowercase , n - 1 )
A__ = self.__min_dist_top_down_dp(m - 1 , lowercase )
A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = worda
A__ = worda
A__ = [[-1 for _ in range(len(lowercase ) )] for _ in range(len(lowercase ) )]
return self.__min_dist_top_down_dp(len(lowercase ) - 1 , len(lowercase ) - 1 )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = worda
A__ = worda
A__ = len(lowercase )
A__ = len(lowercase )
A__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ = j
elif j == 0: # second string is empty
A__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ = self.dp[i - 1][j - 1]
else:
A__ = self.dp[i][j - 1]
A__ = self.dp[i - 1][j]
A__ = self.dp[i - 1][j - 1]
A__ = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowerCAmelCase__ = input("""Enter the first string: """).strip()
lowerCAmelCase__ = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 514 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: SplitDict ) -> Optional[int]:
'''simple docstring'''
A__ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A__ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A__ = None
# the split name of split_dict takes over the name of the split info object
A__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 514 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'yolos'
def __init__( self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-12 , __a=[5_12, 8_64] , __a=16 , __a=3 , __a=True , __a=1_00 , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> Any:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = num_detection_tokens
_UpperCamelCase = use_mid_position_embeddings
_UpperCamelCase = auxiliary_loss
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 78 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 78 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger("transformers.models.speecht5")
def __UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
a_ = checkpoint['input_conv.weight_g']
a_ = checkpoint['input_conv.weight_v']
a_ = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
a_ = checkpoint[F'upsamples.{i}.1.weight_g']
a_ = checkpoint[F'upsamples.{i}.1.weight_v']
a_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
a_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
a_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
a_ = checkpoint['output_conv.1.weight_g']
a_ = checkpoint['output_conv.1.weight_v']
a_ = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
a_ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
a_ = SpeechTaHifiGanConfig()
a_ = SpeechTaHifiGan(lowercase_ )
a_ = torch.load(lowercase_ )
load_weights(orig_checkpoint['model']['generator'] , lowercase_ , lowercase_ )
a_ = np.load(lowercase_ )
a_ = stats[0].reshape(-1 )
a_ = stats[1].reshape(-1 )
a_ = torch.from_numpy(lowercase_ ).float()
a_ = torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 536 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _SCREAMING_SNAKE_CASE (tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , UpperCamelCase : float , UpperCamelCase : Callable , UpperCamelCase : int , UpperCamelCase : float = 1.0 , UpperCamelCase : str = None , )->Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = initial_learning_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = warmup_steps
__SCREAMING_SNAKE_CASE : Optional[int] = power
__SCREAMING_SNAKE_CASE : List[Any] = decay_schedule_fn
__SCREAMING_SNAKE_CASE : int = name
def __call__( self : List[Any] , UpperCamelCase : Any )->Optional[int]:
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(UpperCamelCase , tf.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__SCREAMING_SNAKE_CASE : Any = global_step_float / warmup_steps_float
__SCREAMING_SNAKE_CASE : Tuple = self.initial_learning_rate * tf.math.pow(UpperCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase , )
def __snake_case ( self : str )->List[str]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 0.9 , __lowerCamelCase : float = 0.999 , __lowerCamelCase : float = 1E-8 , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : Optional[List[str]] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCamelCase , )
if num_warmup_steps:
__SCREAMING_SNAKE_CASE : List[str] = WarmUp(
initial_learning_rate=__lowerCamelCase , decay_schedule_fn=__lowerCamelCase , warmup_steps=__lowerCamelCase , )
if weight_decay_rate > 0.0:
__SCREAMING_SNAKE_CASE : int = AdamWeightDecay(
learning_rate=__lowerCamelCase , weight_decay_rate=__lowerCamelCase , beta_a=__lowerCamelCase , beta_a=__lowerCamelCase , epsilon=__lowerCamelCase , clipnorm=__lowerCamelCase , global_clipnorm=__lowerCamelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowerCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Any = tf.keras.optimizers.Adam(
learning_rate=__lowerCamelCase , beta_a=__lowerCamelCase , beta_a=__lowerCamelCase , epsilon=__lowerCamelCase , clipnorm=__lowerCamelCase , global_clipnorm=__lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , UpperCamelCase : float = 0.9 , UpperCamelCase : float = 0.9_9_9 , UpperCamelCase : float = 1E-7 , UpperCamelCase : bool = False , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : str = "AdamWeightDecay" , **UpperCamelCase : int , )->List[Any]:
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = weight_decay_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = include_in_weight_decay
__SCREAMING_SNAKE_CASE : Any = exclude_from_weight_decay
@classmethod
def __snake_case ( cls : Any , UpperCamelCase : Optional[int] )->List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = {"WarmUp": WarmUp}
return super(UpperCamelCase , cls ).from_config(UpperCamelCase , custom_objects=UpperCamelCase )
def __snake_case ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : int )->int:
super(UpperCamelCase , self )._prepare_local(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def __snake_case ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple )->Any:
__SCREAMING_SNAKE_CASE : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def __snake_case ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=None , **UpperCamelCase : str )->List[Any]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = list(zip(*UpperCamelCase ) )
return super(UpperCamelCase , self ).apply_gradients(zip(UpperCamelCase , UpperCamelCase ) , name=UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any )->Optional[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__SCREAMING_SNAKE_CASE : Any = apply_state or {}
__SCREAMING_SNAKE_CASE : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__SCREAMING_SNAKE_CASE : Any = self._fallback_apply_state(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __snake_case ( self : str , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int=None )->List[Any]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self._decay_weights_op(UpperCamelCase , UpperCamelCase , UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase , self )._resource_apply_dense(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None )->List[Any]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = self._decay_weights_op(UpperCamelCase , UpperCamelCase , UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase , self )._resource_apply_sparse(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Tuple )->Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def __snake_case ( self : Dict , UpperCamelCase : Optional[Any] )->Tuple:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase , UpperCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase , UpperCamelCase ) is not None:
return False
return True
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : List[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Dict = None
@property
def __snake_case ( self : str )->str:
if self._accum_steps is None:
__SCREAMING_SNAKE_CASE : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __snake_case ( self : Any )->int:
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , UpperCamelCase : int )->Optional[Any]:
if not self._gradients:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase ) , trainable=UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase )
self._accum_steps.assign_add(1 )
def __snake_case ( self : Any )->Any:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase ) )
| 447 |
from math import sqrt
def _lowerCAmelCase ( __lowerCamelCase : int = 1000000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 447 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ) -> list:
"""simple docstring"""
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = XLMProphetNetTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__: Any =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] ="[PAD]"
lowerCamelCase__: Tuple =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "[PAD]")
self.assertEqual(vocab_keys[1] , "[CLS]")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(UpperCAmelCase_) , 1_012)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase__: Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase__: Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased")
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="Hello World!"
lowerCamelCase__: Dict =[35_389, 6_672, 49, 2]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any ={"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 59 | 0 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableUnCLIPPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A__ = False
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = 32
lowercase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
lowercase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> List[str]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase__ = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 15 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
def lowerCAmelCase ( snake_case__ : int , snake_case__ : int )-> int:
return x if y == 0 else greatest_common_divisor(snake_case__ , x % y )
def lowerCAmelCase ( snake_case__ : int , snake_case__ : int )-> int:
return (x * y) // greatest_common_divisor(snake_case__ , snake_case__ )
def lowerCAmelCase ( snake_case__ : int = 20 )-> int:
A_ = 1
for i in range(1 , n + 1 ):
A_ = lcm(snake_case__ , snake_case__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float )-> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Any):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase ( lowerCamelCase : dict[int, list[int]]):
A_ : Any = 0
A_ : List[str] = len(lowercase__) # No of vertices in graph
A_ : Optional[int] = [0] * n
A_ : int = [False] * n
def dfs(lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str):
A_ : List[str] = True
A_ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowercase__ , lowercase__ , lowercase__ , id_)
A_ : List[str] = min(low[at] , low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
A_ : Optional[Any] = min(low[at] , low[to])
A_ : List[str] = []
for i in range(lowercase__):
if not visited[i]:
dfs(lowercase__ , -1 , lowercase__ , id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase =1_92
__lowercase =7_68
__lowercase =12
__lowercase =3
__lowercase =[8_00, 13_33]
__lowercase =False
elif yolos_name == "yolos_s_dWr":
__lowercase =3_30
__lowercase =14
__lowercase =6
__lowercase =13_20
elif "yolos_s" in yolos_name:
__lowercase =3_84
__lowercase =15_36
__lowercase =12
__lowercase =6
elif "yolos_b" in yolos_name:
__lowercase =[8_00, 13_44]
__lowercase =91
__lowercase ='huggingface/label-files'
__lowercase ='coco-detection-id2label.json'
__lowercase =json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='dataset' ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( lowercase__ : dict, lowercase__ : YolosConfig, lowercase__ : bool = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__lowercase =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[: config.hidden_size, :]
__lowercase =in_proj_bias[: config.hidden_size]
__lowercase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase =in_proj_weight[-config.hidden_size :, :]
__lowercase =in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
if "backbone" in name:
__lowercase =name.replace('backbone', 'vit' )
if "cls_token" in name:
__lowercase =name.replace('cls_token', 'embeddings.cls_token' )
if "det_token" in name:
__lowercase =name.replace('det_token', 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__lowercase =name.replace('mid_pos_embed', 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__lowercase =name.replace('pos_embed', 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__lowercase =name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__lowercase =name.replace('blocks', 'encoder.layer' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn', 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2', 'output.dense' )
if "class_embed" in name:
__lowercase =name.replace('class_embed', 'class_labels_classifier' )
if "bbox_embed" in name:
__lowercase =name.replace('bbox_embed', 'bbox_predictor' )
if "vit.norm" in name:
__lowercase =name.replace('vit.norm', 'vit.layernorm' )
return name
def __UpperCamelCase ( lowercase__ : dict, lowercase__ : YolosForObjectDetection ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[2] )
__lowercase =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[:dim]
__lowercase =val[dim : dim * 2]
__lowercase =val[-dim:]
else:
__lowercase =val
return orig_state_dict
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str, lowercase__ : str, lowercase__ : bool = False ):
'''simple docstring'''
__lowercase =get_yolos_config(lowercase__ )
# load original state_dict
__lowercase =torch.load(lowercase__, map_location='cpu' )['model']
# load 🤗 model
__lowercase =YolosForObjectDetection(lowercase__ )
model.eval()
__lowercase =convert_state_dict(lowercase__, lowercase__ )
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase =8_00 if yolos_name != 'yolos_ti' else 5_12
__lowercase =YolosImageProcessor(format='coco_detection', size=lowercase__ )
__lowercase =image_processor(images=prepare_img(), return_tensors='pt' )
__lowercase =model(**lowercase__ )
__lowercase , __lowercase =outputs.logits, outputs.pred_boxes
__lowercase , __lowercase =None, None
if yolos_name == "yolos_ti":
__lowercase =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__lowercase =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__lowercase =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__lowercase =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__lowercase =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowercase =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__lowercase =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], lowercase__, atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3], lowercase__, atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
__lowercase ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__lowercase =model_mapping[yolos_name]
image_processor.push_to_hub(lowercase__, organization='hustvl' )
model.push_to_hub(lowercase__, organization='hustvl' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 119 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__magic_name__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def snake_case_ ( ):
'''simple docstring'''
_snake_case = Github(os.environ["GITHUB_TOKEN"] )
_snake_case = g.get_repo("huggingface/transformers" )
_snake_case = repo.get_issues(state="open" )
for issue in open_issues:
_snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE__ : i.created_at , reverse=SCREAMING_SNAKE_CASE__ )
_snake_case = comments[0] if len(SCREAMING_SNAKE_CASE__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
if k in (0.04, 0.06):
_snake_case = k
_snake_case = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
return str(self.k )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = cva.imread(lowerCamelCase , 0 )
_snake_case , _snake_case = img.shape
_snake_case = []
_snake_case = img.copy()
_snake_case = cva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB )
_snake_case , _snake_case = np.gradient(lowerCamelCase )
_snake_case = dx**2
_snake_case = dy**2
_snake_case = dx * dy
_snake_case = 0.04
_snake_case = self.window_size // 2
for y in range(lowerCamelCase , h - offset ):
for x in range(lowerCamelCase , w - offset ):
_snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = (wxx * wyy) - (wxy**2)
_snake_case = wxx + wyy
_snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ : Tuple = HarrisCorner(0.04, 3)
__magic_name__ , __magic_name__ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 368 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A = get_tests_dir("""fixtures""")
__A = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__A = get_tests_dir("""fixtures/dummy-config.json""")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ :Any = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCAmelCase__ :Any = WavaVecaFeatureExtractor(**__UpperCAmelCase )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
config.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ :Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ :Tuple = AutoFeatureExtractor.from_pretrained('bert-base' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , revision='aaaaaa' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def snake_case ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ :Any = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
'''simple docstring'''
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = True
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ :Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ :Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 93 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __snake_case ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE__ = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE__ = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE__ = [2, 2, 2_0]
SCREAMING_SNAKE_CASE__ = [3, 1_2, 1_6]
SCREAMING_SNAKE_CASE__ = [1_9_2, 7_6_8, 1_0_2_4]
SCREAMING_SNAKE_CASE__ = CvtForImageClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE__ = list_of_state_dict + cls_token(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + embeddings(lowerCAmelCase_ )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE__ = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 100 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : list[T] ,__A : Callable[[T, T], T] ) -> None:
_lowercase = None
_lowercase = len(__A )
_lowercase = [any_type for _ in range(self.N )] + arr
_lowercase = fnc
self.build()
def __UpperCAmelCase ( self : Dict ) -> None:
for p in range(self.N - 1 ,0 ,-1 ):
_lowercase = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : Dict ,__A : int ,__A : T ) -> None:
p += self.N
_lowercase = v
while p > 1:
_lowercase = p // 2
_lowercase = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def __UpperCAmelCase ( self : Optional[int] ,__A : int ,__A : int ) -> T | None: # noqa: E741
_lowercase , _lowercase = l + self.N, r + self.N
_lowercase = None
while l <= r:
if l % 2 == 1:
_lowercase = self.st[l] if res is None else self.fn(__A ,self.st[l] )
if r % 2 == 0:
_lowercase = self.st[r] if res is None else self.fn(__A ,self.st[r] )
_lowercase , _lowercase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
snake_case = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
snake_case = SegmentTree(test_array, min)
snake_case = SegmentTree(test_array, max)
snake_case = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
_lowercase = reduce(snake_case__ , test_array[i : j + 1] )
_lowercase = reduce(snake_case__ , test_array[i : j + 1] )
_lowercase = reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
snake_case = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments() | 535 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ,*__A : Optional[int] ,**__A : List[Any] ) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 535 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCAmelCase : Dict = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a_ ( UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCamelCase = [image]
lowerCamelCase = [trans(img.convert('RGB' ) ) for img in image]
lowerCamelCase = torch.stack(UpperCamelCase_ )
return image
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : str , __snake_case : Any ) -> Tuple:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : str ) -> Dict:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCamelCase__ ( self : int , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = min(int(num_inference_steps * strength ) , __snake_case )
lowerCamelCase = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self : Any , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Any , __snake_case : Optional[Any]=None ) -> Optional[int]:
'''simple docstring'''
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}''' )
lowerCamelCase = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase = init_latents.shape
lowerCamelCase = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print('add noise to latents at timestep' , __snake_case )
lowerCamelCase = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
lowerCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__snake_case )
# 2. Preprocess image
lowerCamelCase = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
lowerCamelCase , lowerCamelCase = self.get_timesteps(__snake_case , __snake_case , self.device )
lowerCamelCase = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
lowerCamelCase = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
lowerCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
lowerCamelCase = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 246 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowerCAmelCase : Dict = float('nan')
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = sys.stdout
lowerCamelCase = open(__snake_case , 'a' )
def __getattr__( self : int , __snake_case : str ) -> Tuple:
'''simple docstring'''
return getattr(self.stdout , __snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : List[Any] ) -> int:
'''simple docstring'''
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , __snake_case , 0 , re.M ) )
def a_ ( UpperCamelCase_ : List[str]=8_0 , UpperCamelCase_ : Optional[Any]=False ) -> Any:
"""simple docstring"""
lowerCamelCase = []
# deal with critical env vars
lowerCamelCase = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase = os.environ.get(UpperCamelCase_ , UpperCamelCase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCamelCase = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase = []
lowerCamelCase = ''
while len(UpperCamelCase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase_ )
lowerCamelCase = ''
return "\\\n".join(UpperCamelCase_ )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCamelCase = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
lowerCamelCase = subprocess.run(UpperCamelCase_ , capture_output=UpperCamelCase_ , text=UpperCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stdout.txt''' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stderr.txt''' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , 'r' , encoding='utf-8' ) as f:
lowerCamelCase = json.load(UpperCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = f'''{id}: {variation:<{longest_variation_len}}'''
lowerCamelCase = f'''{preamble}: '''
lowerCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase_ ) , desc=UpperCamelCase_ , leave=UpperCamelCase_ ):
lowerCamelCase = process_run_single(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase_ ):
metrics.append(UpperCamelCase_ )
results.append(UpperCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase = f'''\33[2K\r{outcome}'''
if len(UpperCamelCase_ ) > 0:
lowerCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase = f'''{outcome} {mean_target}'''
if len(UpperCamelCase_ ) > 1:
results_str += f''' {tuple(round(UpperCamelCase_ , 2 ) for x in results )}'''
print(UpperCamelCase_ )
lowerCamelCase = variation
return mean_metrics
else:
print(UpperCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB
'''
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase = pd.DataFrame(UpperCamelCase_ )
lowerCamelCase = 'variation'
lowerCamelCase = 'diff_%'
lowerCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase_ ):
lowerCamelCase = df.apply(
lambda UpperCamelCase_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase = df.reindex(UpperCamelCase_ , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase_ ) )
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase_ , type=UpperCamelCase_ , nargs='+' , required=UpperCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.output_dir
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
lowerCamelCase = get_base_command(UpperCamelCase_ , UpperCamelCase_ )
# split each dimension into its --foo variations
lowerCamelCase = [list(map(str.strip , re.split(R'\|' , UpperCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase_ ) ) ) )
lowerCamelCase = max(len(UpperCamelCase_ ) for x in variations )
# split wanted keys
lowerCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
lowerCamelCase = Tee(UpperCamelCase_ )
print(f'''\n*** Running {len(UpperCamelCase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(UpperCamelCase_ )}''' )
lowerCamelCase = 'variation'
lowerCamelCase = []
for id, variation in enumerate(tqdm(UpperCamelCase_ , desc='Total completion: ' , leave=UpperCamelCase_ ) ):
lowerCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.repeat_times , UpperCamelCase_ , args.verbose , ) )
process_results(UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.base_variation , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 246 | 1 |
'''simple docstring'''
def lowercase__ ( _UpperCamelCase = 60_08_51_47_51_43) -> Dict:
"""simple docstring"""
try:
UpperCamelCase = int(_lowercase)
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.')
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.')
UpperCamelCase = 1
UpperCamelCase = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase = i
n //= i
i += 1
if n > 1:
UpperCamelCase = n
return int(_lowercase)
if __name__ == "__main__":
print(F'{solution() = }')
| 718 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
__magic_name__ : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
__magic_name__ : str = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
__magic_name__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__magic_name__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__magic_name__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__magic_name__ : Optional[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__magic_name__ : str = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__snake_case )
class A__ :
'''simple docstring'''
def __call__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
UpperCamelCase = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
UpperCamelCase = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['input_ids']
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['input_ids']
UpperCamelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , ):
"""simple docstring"""
UpperCamelCase = reader_input['input_ids']
UpperCamelCase , UpperCamelCase , UpperCamelCase = reader_output[:3]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
UpperCamelCase = []
for doc_id in sorted_docs:
UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
UpperCamelCase = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
UpperCamelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class A__ ( __snake_case , __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ["""input_ids""", """attention_mask"""]
| 410 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class a_ :
def __init__( self : int , snake_case__ : List[Any]=None , **snake_case__ : List[Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCAmelCase__ = model
lowerCAmelCase__ = kwargs.get("""model_save_dir""" , snake_case__ )
lowerCAmelCase__ = kwargs.get("""latest_model_name""" , snake_case__ )
def __call__( self : int , **snake_case__ : List[Any] ):
lowerCAmelCase__ = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : Union[str, Path] , snake_case__ : List[Any]=None , snake_case__ : int=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCAmelCase__ = """CPUExecutionProvider"""
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Union[str, Any] ):
lowerCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase__ = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase__ = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase__ = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
lowerCAmelCase__ = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[Any] , ):
if os.path.isfile(snake_case__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Union[str, Any] , ):
lowerCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
lowerCAmelCase__ = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
lowerCAmelCase__ = Path(snake_case__ )
# load model from hub
else:
# download model
lowerCAmelCase__ = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
lowerCAmelCase__ = Path(snake_case__ ).parent
lowerCAmelCase__ = Path(snake_case__ ).name
lowerCAmelCase__ = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Optional[Any] , ):
lowerCAmelCase__ = None
if len(str(snake_case__ ).split("""@""" ) ) == 2:
lowerCAmelCase__ , lowerCAmelCase__ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 644 | """simple docstring"""
from itertools import permutations
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCAmelCase ( lowerCamelCase__ = 10 ):
"""simple docstring"""
return sum(
int("""""".join(map(lowerCamelCase__ , lowerCamelCase__ ) ) )
for num in permutations(range(lowerCamelCase__ ) )
if is_substring_divisible(lowerCamelCase__ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 644 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 234 | A = 8.3_14_45_98
def lowerCamelCase ( UpperCamelCase : float , UpperCamelCase : float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A = 3_0_0
A = 2_8
A = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 234 | 1 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
lowerCamelCase_ = '''naver-clova-ix/donut-base'''
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
UpperCAmelCase_ : Optional[int] = DonutProcessor.from_pretrained(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : Any = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
UpperCAmelCase_ : List[str] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
UpperCAmelCase_ : Dict = self.processor.tokenajson(lowerCAmelCase_ )
self.assertDictEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
def lowercase_ (A : int , A : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case__ : List[str] = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowercase = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Union[str, Any]:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def _UpperCamelCase ( self , a , a , a = CHRF.CHAR_ORDER , a = CHRF.WORD_ORDER , a = CHRF.BETA , a = False , a = False , a = False , ) -> Any:
snake_case_ = len(references[0] )
if any(len(a ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
snake_case_ = [[refs[i] for refs in references] for i in range(a )]
snake_case_ = CHRF(a , a , a , a , a , a )
snake_case_ = sb_chrf.corpus_score(a , a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 607 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = KandinskyImgaImgPipeline
lowerCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def _UpperCamelCase ( self ) -> Optional[int]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Dict:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return 1_00
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ = MultilingualCLIP(a )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case_ = UNetaDConditionModel(**a )
return model
@property
def _UpperCamelCase ( self ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
snake_case_ = DDIMScheduler(**a )
snake_case_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , a , a=0 ) -> str:
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(a ).startswith('mps' ):
snake_case_ = torch.manual_seed(a )
else:
snake_case_ = torch.Generator(device=a ).manual_seed(a )
snake_case_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> int:
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**a )
snake_case_ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case_ = pipe(**self.get_dummy_inputs(a ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case_ = 'A red cartoon frog, 4k'
snake_case_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case_ = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
| 607 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ) -> Tuple:
"""simple docstring"""
A = {'add_prefix_space': True} if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not line.startswith(' ' ) else {}
A = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ) -> List[Any]:
"""simple docstring"""
A = input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : str , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[Any] , _lowercase : int="train" , _lowercase : str=None , _lowercase : Tuple=None , _lowercase : List[Any]=None , _lowercase : str="" , ):
super().__init__()
A = Path(_lowercase ).joinpath(type_path + '.source' )
A = Path(_lowercase ).joinpath(type_path + '.target' )
A = self.get_char_lens(self.src_file )
A = max_source_length
A = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A = tokenizer
A = prefix
if n_obs is not None:
A = self.src_lens[:n_obs]
A = src_lang
A = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : Any , _lowercase : str ):
A = index + 1 # linecache starts at 1
A = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip('\n' )
A = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip('\n' )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
A = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
A = encode_line(_lowercase , _lowercase , self.max_source_length , 'right' )
A = encode_line(_lowercase , _lowercase , self.max_target_length , 'right' )
A = source_inputs['input_ids'].squeeze()
A = target_inputs['input_ids'].squeeze()
A = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __a ( _lowercase : Any ):
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def __a ( self : Optional[int] , _lowercase : int ):
A = torch.stack([x['input_ids'] for x in batch] )
A = torch.stack([x['attention_mask'] for x in batch] )
A = torch.stack([x['decoder_input_ids'] for x in batch] )
A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
A = trim_batch(_lowercase , _lowercase )
A , A = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
A = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCamelCase : Union[str, Any] = getLogger(__name__)
def __snake_case ( UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def __snake_case ( UpperCamelCase__ ) -> None:
"""simple docstring"""
A = get_git_info()
save_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , 'git_log.json' ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase__ , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def __snake_case ( ) -> int:
"""simple docstring"""
A = git.Repo(search_parent_directories=UpperCamelCase__ )
A = {
'repo_id': str(UpperCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> List:
"""simple docstring"""
return list(map(UpperCamelCase__ , UpperCamelCase__ ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCamelCase__ , 'wb' ) as f:
return pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
def remove_articles(UpperCamelCase__ ):
return re.sub(r'\b(a|an|the)\b' , ' ' , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
A = normalize_answer(UpperCamelCase__ ).split()
A = normalize_answer(UpperCamelCase__ ).split()
A = Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
A = sum(common.values() )
if num_same == 0:
return 0
A = 1.0 * num_same / len(UpperCamelCase__ )
A = 1.0 * num_same / len(UpperCamelCase__ )
A = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
A = 0
for hypo, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def __snake_case ( UpperCamelCase__ ) -> Any:
"""simple docstring"""
return model_prefix.startswith('rag' )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A = 'dropout_rate'
for p in extra_params:
if getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and not hasattr(UpperCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
continue
A = p if hasattr(UpperCamelCase__ , UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__ , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
delattr(UpperCamelCase__ , UpperCamelCase__ )
return hparams, config
| 690 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A: int = 8
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str=BITS ):
UpperCAmelCase : Tuple = x.device
UpperCAmelCase : List[str] = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
UpperCAmelCase : Any = rearrange(UpperCamelCase , """d -> d 1 1""" )
UpperCAmelCase : Union[str, Any] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
UpperCAmelCase : Any = ((x & mask) != 0).float()
UpperCAmelCase : str = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
UpperCAmelCase : List[Any] = bits * 2 - 1
return bits
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Dict=BITS ):
UpperCAmelCase : Tuple = x.device
UpperCAmelCase : List[Any] = (x > 0).int()
UpperCAmelCase : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
UpperCAmelCase : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
UpperCAmelCase : str = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
UpperCAmelCase : Optional[int] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Dict , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase : Any = self.alphas_cumprod[timestep]
UpperCAmelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase : str = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase : Optional[int] = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase : str = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase : List[str] = self._get_variance(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : str = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase : List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase : Union[str, Any] = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
UpperCAmelCase : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
UpperCAmelCase : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def _snake_case ( self : List[str] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : List[Any]="epsilon" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : bool = True , ):
UpperCAmelCase : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : Optional[int] = None
# 1. compute alphas, betas
UpperCAmelCase : Dict = self.alphas_cumprod[t]
UpperCAmelCase : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase : List[str] = 1 - alpha_prod_t
UpperCAmelCase : Optional[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase : List[str] = model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
UpperCAmelCase : str = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase : Tuple = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : str = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase : Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : Any = 0
if t > 0:
UpperCAmelCase : str = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
UpperCAmelCase : List[str] = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1.0 , ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[int] = bit_scale
UpperCAmelCase : Optional[Any] = (
ddim_bit_scheduler_step if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = decimal_to_bits(_SCREAMING_SNAKE_CASE ) * self.bit_scale
UpperCAmelCase : str = latents.to(self.device )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase : List[Any] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Optional[int] = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase : Optional[Any] = bits_to_decimal(_SCREAMING_SNAKE_CASE )
if output_type == "pil":
UpperCAmelCase : Optional[int] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 359 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A: Union[str, Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A: int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A: Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Tuple = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] )
return (item, float(UpperCamelCase ))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : List[str] = random.randint(0 , len(UpperCamelCase ) - 1 )
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] ):
UpperCAmelCase : str = list(UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase : int = random.choice(UpperCamelCase )
return "".join(UpperCamelCase )
def _snake_case ( UpperCamelCase : tuple[str, float] , UpperCamelCase : list[tuple[str, float]] , UpperCamelCase : list[str] , ):
UpperCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1
UpperCAmelCase : List[str] = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase ):
UpperCAmelCase : List[str] = population_score[random.randint(0 , UpperCamelCase )][0]
UpperCAmelCase , UpperCAmelCase : Any = crossover(parent_a[0] , UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
return pop
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] , UpperCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase : Dict = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase : Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(UpperCamelCase )
# Generate random starting population.
UpperCAmelCase : Optional[int] = []
for _ in range(UpperCamelCase ):
population.append("""""".join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase , UpperCAmelCase : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase : str = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase : Union[str, Any] = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase : List[str] = [
(item, score / len(UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase ):
population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
A: Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A: Dict = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A , A , A: List[Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 359 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = DDIMPipeline
_lowerCamelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCamelCase : Any = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_lowerCamelCase : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def __A ( self : Any ):
torch.manual_seed(0 )
A_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
A_ = DDIMScheduler()
A_ = {"unet": unet, "scheduler": scheduler}
return components
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __A ( self : str ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A_ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def __A ( self : Any ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __A ( self : Optional[int] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def __A ( self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __A ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ):
A_ = "google/ddpm-cifar10-32"
A_ = UNetaDModel.from_pretrained(UpperCAmelCase )
A_ = DDIMScheduler()
A_ = DDIMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
ddim.to(UpperCAmelCase )
ddim.set_progress_bar_config(disable=UpperCAmelCase )
A_ = torch.manual_seed(0 )
A_ = ddim(generator=UpperCAmelCase , eta=0.0 , output_type="numpy" ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Optional[Any] ):
A_ = "google/ddpm-ema-bedroom-256"
A_ = UNetaDModel.from_pretrained(UpperCAmelCase )
A_ = DDIMScheduler.from_pretrained(UpperCAmelCase )
A_ = DDIMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
ddpm.to(UpperCAmelCase )
ddpm.set_progress_bar_config(disable=UpperCAmelCase )
A_ = torch.manual_seed(0 )
A_ = ddpm(generator=UpperCAmelCase , output_type="numpy" ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 86 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE : Optional[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE : Tuple = """summarizer"""
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE : int = ["""text"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""text"""]
def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
| 427 | 0 |
def lowerCAmelCase_ ( A_ ,A_):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive")
UpperCamelCase__: Optional[int] = str(bin(A_))[2:] # remove the leading "0b"
UpperCamelCase__: Union[str, Any] = str(bin(A_))[2:]
UpperCamelCase__: Union[str, Any] = max(len(A_) ,len(A_))
return "0b" + "".join(
str(int("1" in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(A_) ,b_binary.zfill(A_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 |
def lowerCAmelCase_ ( ):
for n in range(1 ,1_00_00_00):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( A_):
UpperCamelCase__: int = 1
UpperCamelCase__: Dict = 2
while i * i <= n:
UpperCamelCase__: Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(A_) > 5_00)
if __name__ == "__main__":
print(solution())
| 221 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.