code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=9_9 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Dict=3_6 , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=5_1_2 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Dict=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_hidden_groups
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Optional[int] ) -> str:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase = AlbertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Dict:
__lowerCAmelCase = AlbertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , sentence_order_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AlbertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
__lowerCAmelCase = AlbertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = AlbertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
def lowercase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=False ) -> List[Any]:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = AlbertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AlbertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = AlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) )
| 284 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 4_2
_lowerCAmelCase = jnp.floataa
_lowerCAmelCase = True
def __UpperCAmelCase ( self ) -> Optional[Any]:
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
_a = super().__call__(*__magic_name__ , **__magic_name__ )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
def cross_entropy(lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(lowerCAmelCase__ )[None]).astype('f4' )
_a = jax.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(lowerCAmelCase__ )
return loss
_a = partial(lowerCAmelCase__ , reduction=jnp.mean )
_a = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
_a = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
_a = cross_entropy(lowerCAmelCase__ , lowerCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
_lowerCAmelCase = """google/bigbird-roberta-base"""
_lowerCAmelCase = 3_0_0_0
_lowerCAmelCase = 1_0_5_0_0
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 3
_lowerCAmelCase = 1
_lowerCAmelCase = 5
# tx_args
_lowerCAmelCase = 3E-5
_lowerCAmelCase = 0.0
_lowerCAmelCase = 2_0_0_0_0
_lowerCAmelCase = 0.0_095
_lowerCAmelCase = """bigbird-roberta-natural-questions"""
_lowerCAmelCase = """training-expt"""
_lowerCAmelCase = """data/nq-training.jsonl"""
_lowerCAmelCase = """data/nq-validation.jsonl"""
def __UpperCAmelCase ( self ) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=__magic_name__ )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self , __magic_name__ ) -> List[str]:
_a = self.collate_fn(__magic_name__ )
_a = jax.tree_util.tree_map(__magic_name__ , __magic_name__ )
return batch
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
_a , _a = self.fetch_inputs(features['input_ids'] )
_a = {
'input_ids': jnp.array(__magic_name__ , dtype=jnp.intaa ),
'attention_mask': jnp.array(__magic_name__ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
_a = [self._fetch_inputs(__magic_name__ ) for ids in input_ids]
return zip(*__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
_a = [1 for _ in range(len(__magic_name__ ) )]
while len(__magic_name__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=None ) -> Dict:
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCAmelCase__ )
@partial(jax.pmap , axis_name='batch' )
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , **lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
def loss_fn(lowerCAmelCase__ :Tuple ):
_a = model_inputs.pop('start_labels' )
_a = model_inputs.pop('end_labels' )
_a = model_inputs.pop('pooled_labels' )
_a = state.apply_fn(**lowerCAmelCase__ , params=lowerCAmelCase__ , dropout_rng=lowerCAmelCase__ , train=lowerCAmelCase__ )
_a , _a , _a = outputs
return state.loss_fn(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_a , _a = jax.random.split(lowerCAmelCase__ )
_a = jax.value_and_grad(lowerCAmelCase__ )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({'loss': loss} , axis_name='batch' )
_a = jax.lax.pmean(lowerCAmelCase__ , 'batch' )
_a = state.apply_gradients(grads=lowerCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _A (lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = model_inputs.pop('start_labels' )
_a = model_inputs.pop('end_labels' )
_a = model_inputs.pop('pooled_labels' )
_a = state.apply_fn(**lowerCAmelCase__ , params=state.params , train=lowerCAmelCase__ )
_a , _a , _a = outputs
_a = state.loss_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_a = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class a ( train_state.TrainState ):
_lowerCAmelCase = struct.field(pytree_node=_SCREAMING_SNAKE_CASE )
@dataclass
class a :
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Dict:
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=__magic_name__ , tx=__magic_name__ , loss_fn=__magic_name__ , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(__magic_name__ , __magic_name__ )
_a = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
_a , _a = build_tx(**__magic_name__ )
_a = train_state.TrainState(
step=__magic_name__ , apply_fn=model.__call__ , params=__magic_name__ , tx=__magic_name__ , opt_state=__magic_name__ , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(__magic_name__ )
return state
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = self.args
_a = len(__magic_name__ ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(__magic_name__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(__magic_name__ , args.batch_size , seed=__magic_name__ )
_a = 0
for batch in tqdm(__magic_name__ , total=__magic_name__ , desc=f'Running EPOCH-{epoch}' ):
_a = self.data_collator(__magic_name__ )
_a , _a , _a = self.train_step_fn(__magic_name__ , __magic_name__ , **__magic_name__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(__magic_name__ , __magic_name__ )
_a = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__magic_name__ ) )
self.logger.log(__magic_name__ , commit=__magic_name__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = get_batched_dataset(__magic_name__ , self.args.batch_size )
_a = len(__magic_name__ ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(__magic_name__ , total=__magic_name__ , desc='Evaluating ... ' ):
_a = self.data_collator(__magic_name__ )
_a = self.val_step_fn(__magic_name__ , **__magic_name__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = jax_utils.unreplicate(__magic_name__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(__magic_name__ , params=state.params )
with open(os.path.join(__magic_name__ , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__magic_name__ , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(__magic_name__ , 'data_collator.joblib' ) )
with open(os.path.join(__magic_name__ , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , __magic_name__ )
print('DONE' )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(lowerCAmelCase__ , 'flax_model.msgpack' ) , 'rb' ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCAmelCase__ , 'opt_state.msgpack' ) , 'rb' ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(lowerCAmelCase__ , 'args.joblib' ) )
_a = joblib.load(os.path.join(lowerCAmelCase__ , 'data_collator.joblib' ) )
with open(os.path.join(lowerCAmelCase__ , 'training_state.json' ) , 'r' ) as f:
_a = json.load(lowerCAmelCase__ )
_a = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=lowerCAmelCase__ , transition_steps=lowerCAmelCase__ )
_a = optax.linear_schedule(init_value=lowerCAmelCase__ , end_value=1E-7 , transition_steps=lowerCAmelCase__ )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
def weight_decay_mask(lowerCAmelCase__ :Optional[int] ):
_a = traverse_util.flatten_dict(lowerCAmelCase__ )
_a = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCAmelCase__ )
_a = scheduler_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_a = optax.adamw(learning_rate=lowerCAmelCase__ , weight_decay=lowerCAmelCase__ , mask=lowerCAmelCase__ )
return tx, lr
| 360 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCAmelCase : List[Any] = "scheduler_config.json"
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = 5
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
class _UpperCamelCase :
UpperCAmelCase_ = SCHEDULER_CONFIG_NAME
UpperCAmelCase_ = ["""dtype"""]
UpperCAmelCase_ = []
UpperCAmelCase_ = True
@classmethod
def UpperCAmelCase_ ( cls :List[Any] , lowerCamelCase :Dict[str, Any] = None , lowerCamelCase :Optional[str] = None , lowerCamelCase :Any=False , **lowerCamelCase :Dict , ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ , UpperCAmelCase__ = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
UpperCAmelCase__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Union[str, os.PathLike] , lowerCamelCase :bool = False , **lowerCamelCase :Optional[int] ) -> Dict:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls :str ) -> Optional[int]:
UpperCAmelCase__ = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase__ = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase__ = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Tuple[int] ):
"""simple docstring"""
assert len(_lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCAmelCase ) - x.ndim) ) , _lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : Optional[int]=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowerCAmelCase : Tuple ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCAmelCase__ = []
for i in range(_lowerCAmelCase ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCAmelCase ) / alpha_bar(_lowerCAmelCase ) , _lowerCAmelCase ) )
return jnp.array(_lowerCAmelCase , dtype=_lowerCAmelCase )
@flax.struct.dataclass
class _UpperCamelCase :
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowerCamelCase :Optional[int] ) -> Optional[int]:
UpperCAmelCase__ = scheduler.config
if config.trained_betas is not None:
UpperCAmelCase__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCAmelCase__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCAmelCase__ = 1.0 - betas
UpperCAmelCase__ = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ = state.alphas_cumprod
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
UpperCAmelCase__ = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
UpperCAmelCase__ = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """markuplm"""
def __init__( self :Dict , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :List[Any]=768 , lowerCamelCase :Union[str, Any]=12 , lowerCamelCase :Optional[int]=12 , lowerCamelCase :List[str]=3072 , lowerCamelCase :Dict="gelu" , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :int=0.02 , lowerCamelCase :int=1e-12 , lowerCamelCase :Tuple=0 , lowerCamelCase :List[str]=0 , lowerCamelCase :int=2 , lowerCamelCase :Optional[int]=256 , lowerCamelCase :List[str]=1024 , lowerCamelCase :Optional[Any]=216 , lowerCamelCase :str=1001 , lowerCamelCase :List[str]=32 , lowerCamelCase :Dict=50 , lowerCamelCase :int="absolute" , lowerCamelCase :Union[str, Any]=True , lowerCamelCase :Dict=None , **lowerCamelCase :List[Any] , ) -> int:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
# additional properties
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = max_xpath_tag_unit_embeddings
UpperCAmelCase__ = max_xpath_subs_unit_embeddings
UpperCAmelCase__ = tag_pad_id
UpperCAmelCase__ = subs_pad_id
UpperCAmelCase__ = xpath_unit_hidden_size
| 169 | 1 |
import argparse
import copy
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = {}
with open(__lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__snake_case : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
__snake_case : List[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__snake_case : Union[str, Any] = []
_list.append([line.split()[0], line.split()[2]] )
__snake_case : str = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read(1 )
__snake_case : Optional[int] = start_node
__snake_case : List[Any] = []
__snake_case : Any = start_node
__snake_case : int = 0
while visiting not in first_solution:
__snake_case : Tuple = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCamelCase ) and k[0] not in first_solution:
__snake_case : Any = k[1]
__snake_case : List[Any] = k[0]
first_solution.append(__lowerCamelCase )
__snake_case : Dict = distance_of_first_solution + int(__lowerCamelCase )
__snake_case : List[Any] = best_node
first_solution.append(__lowerCamelCase )
__snake_case : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__snake_case : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = []
for n in solution[1:-1]:
__snake_case : Optional[Any] = solution.index(__lowerCamelCase )
for kn in solution[1:-1]:
__snake_case : str = solution.index(__lowerCamelCase )
if n == kn:
continue
__snake_case : List[str] = copy.deepcopy(__lowerCamelCase )
__snake_case : int = kn
__snake_case : Optional[Any] = n
__snake_case : Optional[Any] = 0
for k in _tmp[:-1]:
__snake_case : Optional[Any] = _tmp[_tmp.index(__lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__snake_case : Any = distance + int(i[1] )
_tmp.append(__lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__snake_case : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = 1
__snake_case : str = first_solution
__snake_case : Optional[int] = []
__snake_case : List[str] = distance_of_first_solution
__snake_case : Optional[Any] = solution
while count <= iters:
__snake_case : Dict = find_neighborhood(__lowerCamelCase , __lowerCamelCase )
__snake_case : Union[str, Any] = 0
__snake_case : Optional[Any] = neighborhood[index_of_best_solution]
__snake_case : Any = len(__lowerCamelCase ) - 1
__snake_case : Any = False
while not found:
__snake_case : Any = 0
while i < len(__lowerCamelCase ):
if best_solution[i] != solution[i]:
__snake_case : int = best_solution[i]
__snake_case : List[str] = solution[i]
break
__snake_case : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__snake_case : Any = True
__snake_case : Dict = best_solution[:-1]
__snake_case : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__snake_case : Optional[int] = cost
__snake_case : List[str] = solution
else:
__snake_case : Tuple = index_of_best_solution + 1
__snake_case : Dict = neighborhood[index_of_best_solution]
if len(__lowerCamelCase ) >= size:
tabu_list.pop(0 )
__snake_case : Optional[int] = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase_ ( __lowerCamelCase=None ):
__snake_case : Tuple = generate_neighbours(args.File )
__snake_case , __snake_case : Tuple = generate_first_solution(
args.File , __lowerCamelCase )
__snake_case , __snake_case : Optional[Any] = tabu_search(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 134 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case : Union[str, Any] = datasets.load_iris()
_snake_case : Tuple = np.array(data["data"])
_snake_case : int = np.array(data["target"])
_snake_case : int = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case : Any = train_test_split(X, y)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ):
__snake_case : Optional[Any] = zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__snake_case : Optional[int] = []
for data_point in data:
__snake_case : Union[str, Any] = euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Dict = [i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Any = Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 134 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class __a (UpperCAmelCase__):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> List[Any]:
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
def _a ( self , _a , *_a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super().add_tokens(_a , *_a , **_a )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def _a ( self , _a , *_a , _a=1 , **_a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(_a , *_a , **_a )
output.append(_a )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(_a ):
SCREAMING_SNAKE_CASE__ : str = placeholder_token + f'''_{i}'''
self.try_adding_tokens(_a , *_a , **_a )
output.append(_a )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
SCREAMING_SNAKE_CASE__ : List[Any] = output
def _a ( self , _a , _a=False , _a=1.0 ) -> Tuple:
"""simple docstring"""
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : int = []
for i in range(len(_a ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_a ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE__ : Tuple = tokens[: 1 + int(len(_a ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE__ : List[str] = copy.copy(_a )
random.shuffle(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = text.replace(_a , """ """.join(_a ) )
return text
def __call__( self , _a , *_a , _a=False , _a=1.0 , **_a ) -> List[str]:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_a , vector_shuffle=_a , prop_tokens_to_load=_a ) , *_a , **_a , )
def _a ( self , _a , *_a , _a=False , _a=1.0 , **_a ) -> List[Any]:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_a , vector_shuffle=_a , prop_tokens_to_load=_a ) , *_a , **_a , )
| 132 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase = 5_0_0_0_0_0
_UpperCAmelCase, _UpperCAmelCase = os.path.split(__file__)
_UpperCAmelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =dataset.map(**lowercase )
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset.filter(**lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE_: int =generate_example_dataset(
os.path.join(lowercase , """dataset.arrow""" ) , lowercase , num_examples=lowercase )
SCREAMING_SNAKE_CASE_: int =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase )
def tokenize(lowercase ):
return tokenizer(examples["""text"""] )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase )
SCREAMING_SNAKE_CASE_: str =map(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""numpy""" ):
SCREAMING_SNAKE_CASE_: Any =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""pandas""" ):
SCREAMING_SNAKE_CASE_: Dict =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: int =map(lowercase , function=lambda lowercase : None , batched=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase , """wb""" ) as f:
f.write(json.dumps(lowercase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 0 |
from __future__ import annotations
import math
snake_case_ = '2020.9.26'
snake_case_ = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> tuple[float, float]:
if not all(isinstance(snake_case_ , (float, int) ) for val in locals().values() ):
__snake_case = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(snake_case_ )
__snake_case = ((x * distance) / (z + distance)) * scale
__snake_case = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : str , snake_case_ : float ) -> tuple[float, float, float]:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Axis must be a str''' )
__snake_case = locals()
del input_variables["axis"]
if not all(isinstance(snake_case_ , (float, int) ) for val in input_variables.values() ):
__snake_case = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(snake_case_ )
__snake_case = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__snake_case = x * math.cos(snake_case_ ) - y * math.sin(snake_case_ )
__snake_case = y * math.cos(snake_case_ ) + x * math.sin(snake_case_ )
__snake_case = z
elif axis == "x":
__snake_case = y * math.cos(snake_case_ ) - z * math.sin(snake_case_ )
__snake_case = z * math.cos(snake_case_ ) + y * math.sin(snake_case_ )
__snake_case = x
elif axis == "y":
__snake_case = x * math.cos(snake_case_ ) - z * math.sin(snake_case_ )
__snake_case = z * math.cos(snake_case_ ) + x * math.sin(snake_case_ )
__snake_case = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 238 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''big_bird'''
def __init__( self : Dict , __magic_name__ : Union[str, Any]=50_358 , __magic_name__ : Dict=768 , __magic_name__ : Any=12 , __magic_name__ : List[Any]=12 , __magic_name__ : Optional[int]=3_072 , __magic_name__ : Union[str, Any]="gelu_new" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[int]=4_096 , __magic_name__ : List[Any]=2 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=1e-12 , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]=0 , __magic_name__ : List[Any]=1 , __magic_name__ : Tuple=2 , __magic_name__ : Optional[int]=66 , __magic_name__ : int="block_sparse" , __magic_name__ : List[Any]=True , __magic_name__ : Dict=False , __magic_name__ : str=64 , __magic_name__ : str=3 , __magic_name__ : Union[str, Any]=None , **__magic_name__ : Any , ) -> Tuple:
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , sep_token_id=__magic_name__ , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = rescale_embeddings
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = use_bias
SCREAMING_SNAKE_CASE_ = block_size
SCREAMING_SNAKE_CASE_ = num_random_blocks
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 118 | import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : Any , __magic_name__ : List[Any]=13 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=24 , __magic_name__ : List[str]=16 , __magic_name__ : Dict=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=32 , __magic_name__ : Tuple=5 , __magic_name__ : int=4 , __magic_name__ : Tuple=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Tuple=None , __magic_name__ : Any=2 , __magic_name__ : Dict=2 , ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = frequency_stride
SCREAMING_SNAKE_CASE_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE_ = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE_ = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE_ = num_patches + 2
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_values, labels
def __A ( self : Any ) -> Dict:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __A ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ASTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> Tuple:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __A ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ASTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Tuple:
pass
def __A ( self : int ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["input_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@slow
def __A ( self : int ) -> int:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = ASTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_audio()
SCREAMING_SNAKE_CASE_ = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE_ = feature_extractor(__magic_name__ , sampling_rate=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 118 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : str = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Tuple = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 243 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCAmelCase_):
_lowercase : Tuple = "xlm-prophetnet"
_lowercase : Dict = ["past_key_values"]
_lowercase : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 3_0_5_2_2 , lowerCAmelCase__ = 1_0_2_4 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 1_2_8 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =vocab_size
a__ : Dict =hidden_size
a__ : Optional[Any] =encoder_ffn_dim
a__ : Optional[int] =num_encoder_layers
a__ : Union[str, Any] =num_encoder_attention_heads
a__ : str =decoder_ffn_dim
a__ : Tuple =num_decoder_layers
a__ : str =num_decoder_attention_heads
a__ : List[str] =max_position_embeddings
a__ : Tuple =init_std # Normal(0, this parameter)
a__ : Dict =activation_function
# parameters for xlmprophetnet
a__ : Union[str, Any] =ngram
a__ : Tuple =num_buckets
a__ : Optional[Any] =relative_max_distance
a__ : Optional[Any] =disable_ngram_loss
a__ : List[Any] =eps
# 3 Types of Dropout
a__ : Optional[int] =attention_dropout
a__ : Optional[int] =activation_dropout
a__ : str =dropout
a__ : int =use_cache
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 95 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
A__ = int(np.ceil((x_end - xa) / step_size ) )
A__ = np.zeros((n + 1,) )
A__ = ya
A__ = xa
for k in range(A__ ):
A__ = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE = object()
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
A__ = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowercase_ ) - len(lowercase_ ) + 1 ):
A__ = [x.match(lowercase_ ) for x, y in zip(lowercase_ , ks[i:] )]
if matches and all(lowercase_ ):
return True
return False
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
def replace(lowercase_ , lowercase_ ):
for rule, replacement in rules:
if _match(lowercase_ , lowercase_ ):
return replacement
return val
return replace
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowercase_ )),
(("transformer", "wte", "embedding"), P("mp" , lowercase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowercase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowercase_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowercase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
A__ = _get_partition_rules()
A__ = _replacement_rules(lowercase_ )
A__ = {k: _unmatched for k in flatten_dict(lowercase_ )}
A__ = {k: replace(lowercase_ , lowercase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowercase_ ) )
| 230 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def lowercase ( __magic_name__ ):
'''simple docstring'''
if hor == 128:
UpperCAmelCase : Optional[int] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase : Dict = (32, 128, 256)
UpperCAmelCase : Any = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
UpperCAmelCase : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase : List[str] = (32, 64, 128, 256)
UpperCAmelCase : Optional[int] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
UpperCAmelCase : List[Any] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
UpperCAmelCase : str = model.state_dict()
UpperCAmelCase : Optional[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
UpperCAmelCase : Union[str, Any] = UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase : Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase : Dict = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
UpperCAmelCase : Tuple = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
UpperCAmelCase : int = model
UpperCAmelCase : List[str] = UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase : Tuple = state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 311 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311 | 1 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [int(lowerCamelCase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowerCamelCase__ ) == 4 and all(0 <= int(lowerCamelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCAmelCase__ = input().strip()
lowerCAmelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 121 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
lowercase__ : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowercase__ : Any = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(lowerCamelCase__ ) , "Postfix".center(lowerCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = ")" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Optional[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 121 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=2 , __magic_name__=9_9 , __magic_name__=0 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=2 , __magic_name__=4 , __magic_name__="last" , __magic_name__=True , __magic_name__=None , __magic_name__=0 , ):
lowerCamelCase : List[str] = parent
lowerCamelCase : int = batch_size
lowerCamelCase : str = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Any = use_input_lengths
lowerCamelCase : Optional[int] = use_token_type_ids
lowerCamelCase : int = use_labels
lowerCamelCase : Dict = gelu_activation
lowerCamelCase : int = sinusoidal_embeddings
lowerCamelCase : str = causal
lowerCamelCase : Union[str, Any] = asm
lowerCamelCase : Optional[Any] = n_langs
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : List[str] = n_special
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Optional[Any] = type_sequence_label_size
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : str = summary_type
lowerCamelCase : int = use_proj
lowerCamelCase : Dict = scope
lowerCamelCase : Optional[int] = bos_token_id
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Any = XLMModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , lengths=__a , langs=__a )
lowerCamelCase : Union[str, Any] = model(__a , langs=__a )
lowerCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Optional[Any] = XLMWithLMHeadModel(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = XLMForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
lowerCamelCase : Optional[int] = model(__a , start_positions=__a , end_positions=__a )
lowerCamelCase : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Any = XLMForQuestionAnswering(__a )
model.to(__a )
model.eval()
lowerCamelCase : Dict = model(__a )
lowerCamelCase : Optional[Any] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
lowerCamelCase : List[Any] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
(lowerCamelCase ) : Union[str, Any] = result_with_labels.to_tuple()
lowerCamelCase : Union[str, Any] = model(__a , start_positions=__a , end_positions=__a )
(lowerCamelCase ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = XLMForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a )
lowerCamelCase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Any = XLMForTokenClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : str = self.num_choices
lowerCamelCase : Union[str, Any] = XLMForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Optional[int] = config_and_inputs
lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase : Optional[Any] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = XLMModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , emb_dim=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__a )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=1 ):
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__a ):
# adds PAD dummy token
lowerCamelCase : Optional[Any] = min_length + idx + 1
lowerCamelCase : List[str] = min_length + idx + 1
lowerCamelCase : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=1 ):
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__a ):
# adds PAD dummy token
lowerCamelCase : List[str] = min_length + idx + 1
lowerCamelCase : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , )
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = XLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__a )
lowerCamelCase : Union[str, Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__a ) # the president
lowerCamelCase : int = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCamelCase : Optional[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
a__ : Any = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
a__ : int = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
a__ : Tuple = tf_top_k_top_p_filtering(lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
a__ : Tuple = output[output != -float('inf')]
a__ : int = tf.cast(
tf.where(tf.not_equal(lowercase , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-12)
tf.debugging.assert_equal(lowercase , lowercase)
@require_tf
class A__ ( unittest.TestCase , __UpperCAmelCase ):
"""simple docstring"""
if is_tf_available():
__A : Dict = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a__ : int = 2
a__ : List[Any] = 2
class A__ ( tf.Module ):
"""simple docstring"""
def __init__( self , lowercase) -> Tuple:
'''simple docstring'''
super(lowercase , self).__init__()
a__ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'),
) , jit_compile=lowercase , )
def __lowercase ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.model.generate(
input_ids=lowercase , attention_mask=lowercase , max_new_tokens=lowercase , return_dict_in_generate=lowercase , )
return {"sequences": outputs["sequences"]}
a__ : Tuple = [[2, 0], [102, 103]]
a__ : Union[str, Any] = [[1, 0], [1, 1]]
a__ : Dict = DummyModel(model=lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowercase , lowercase , signatures={'serving_default': dummy_model.serving})
a__ : Any = tf.saved_model.load(lowercase).signatures['serving_default']
for batch_size in range(1 , len(lowercase) + 1):
a__ : Tuple = {
'input_ids': tf.constant(dummy_input_ids[:batch_size]),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size]),
}
a__ : Any = serving_func(**lowercase)['sequences']
a__ : Optional[Any] = test_model.generate(**lowercase , max_new_tokens=lowercase)
tf.debugging.assert_equal(lowercase , lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a__ : Tuple = 1
a__ : List[str] = 2
class A__ ( tf.Module ):
"""simple docstring"""
def __init__( self , lowercase) -> int:
'''simple docstring'''
super(lowercase , self).__init__()
a__ : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'),
) , jit_compile=lowercase , )
def __lowercase ( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = self.model.generate(
input_ids=lowercase , attention_mask=lowercase , max_new_tokens=lowercase , return_dict_in_generate=lowercase , )
return {"sequences": outputs["sequences"]}
a__ : Any = [[2], [102, 103]]
a__ : Dict = [[1], [1, 1]]
a__ : List[Any] = DummyModel(model=lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowercase , lowercase , signatures={'serving_default': dummy_model.serving})
a__ : str = tf.saved_model.load(lowercase).signatures['serving_default']
for input_row in range(len(lowercase)):
a__ : Union[str, Any] = {
'input_ids': tf.constant([dummy_input_ids[input_row]]),
'attention_mask': tf.constant([dummy_attention_masks[input_row]]),
}
a__ : Union[str, Any] = serving_func(**lowercase)['sequences']
a__ : List[Any] = test_model.generate(**lowercase , max_new_tokens=lowercase)
tf.debugging.assert_equal(lowercase , lowercase)
@slow
@require_tensorflow_text
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=lowercase)
class A__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self) -> Optional[int]:
'''simple docstring'''
super().__init__()
a__ : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowercase , 'spiece.model') , 'rb').read())
a__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5')
def __lowercase ( self , lowercase , *lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.tokenizer.tokenize(lowercase)
a__ : Optional[Any] = text.pad_model_inputs(
lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
a__ : Optional[int] = self.model.generate(input_ids=lowercase , attention_mask=lowercase)
return self.tokenizer.detokenize(lowercase)
a__ : str = CompleteSentenceTransformer()
a__ : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs')
a__ : Optional[Any] = complete_model(lowercase)
a__ : Optional[Any] = tf.keras.Model(lowercase , lowercase)
keras_model.save(lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
a__ : Optional[Any] = 14
a__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a__ : Union[str, Any] = 'Hello, my dog is cute and'
a__ : str = tokenizer(lowercase , return_tensors='tf')
a__ : Tuple = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
a__ : List[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
a__ : int = model.generate(**lowercase , eos_token_id=lowercase , **lowercase)
self.assertTrue(expectation == len(generated_tokens[0]))
a__ : Any = [638, 198]
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
a__ : Union[str, Any] = model.generate(**lowercase , eos_token_id=lowercase , **lowercase)
self.assertTrue(expectation == len(generated_tokens[0]))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart')
a__ : Any = 'Hugging Face is a technology company based in New York and Paris.'
a__ : str = bart_tokenizer(lowercase , return_tensors='tf').input_ids
a__ : Optional[int] = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart')
a__ : Union[str, Any] = bart_model.generate(lowercase).numpy()
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
return super().call(lowercase , **lowercase)
a__ : List[str] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart')
a__ : Optional[int] = bart_model.generate(lowercase , foo='bar').numpy()
self.assertTrue(np.array_equal(lowercase , lowercase))
class A__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __lowercase ( self , lowercase , **lowercase) -> Any:
'''simple docstring'''
return super().call(lowercase , **lowercase)
a__ : str = FakeEncoder(bart_model.config , bart_model.model.shared)
a__ : int = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
a__ : str = bart_model.generate(lowercase).numpy()
with self.assertRaises(lowercase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowercase , foo='bar')
| 350 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__UpperCAmelCase )} )
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
__A : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
__A : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__A : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
__A : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''train'''
__A : Tuple = '''dev'''
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : SquadDataTrainingArguments
__A : List[SquadFeatures]
__A : Split
__A : bool
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> str:
'''simple docstring'''
a__ : List[str] = args
a__ : Any = is_language_sensitive
a__ : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase):
try:
a__ : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
a__ : List[Any] = mode
# Load data features from cache or dataset file
a__ : List[str] = 'v2' if args.version_2_with_negative else 'v1'
a__ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : List[Any] = cached_features_file + '.lock'
with FileLock(lowercase):
if os.path.exists(lowercase) and not args.overwrite_cache:
a__ : List[Any] = time.time()
a__ : Optional[Any] = torch.load(lowercase)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : List[str] = self.old_features['features']
a__ : int = self.old_features.get('dataset' , lowercase)
a__ : Dict = self.old_features.get('examples' , lowercase)
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run')
else:
if mode == Split.dev:
a__ : Dict = self.processor.get_dev_examples(args.data_dir)
else:
a__ : int = self.processor.get_train_examples(args.data_dir)
a__ , a__ : Any = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
a__ : Tuple = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self) -> Tuple:
'''simple docstring'''
return len(self.features)
def __getitem__( self , lowercase) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : Union[str, Any] = self.features[i]
a__ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long)
a__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long)
a__ : str = torch.tensor(feature.cls_index , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float)
a__ : Tuple = torch.tensor(feature.is_impossible , dtype=torch.float)
a__ : str = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask})
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible})
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
a__ : Dict = torch.tensor(feature.start_position , dtype=torch.long)
a__ : str = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({'start_positions': start_positions, 'end_positions': end_positions})
return inputs
| 225 | 0 |
"""simple docstring"""
import sys
__lowerCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 1
for digit in s:
product *= int(UpperCamelCase__ )
return product
def UpperCAmelCase ( UpperCamelCase__ = N ):
"""simple docstring"""
A__ = -sys.maxsize - 1
A__ = n[:13]
A__ = 13
while cur_index < len(UpperCamelCase__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
A__ = substr[1:] + n[cur_index]
cur_index += 1
else:
A__ = max(UpperCamelCase__ , str_eval(UpperCamelCase__ ) )
A__ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 221 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return None
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCAmelCase ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,'pt' ,12 ,__UpperCAmelCase )
@require_tf
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
A__ = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def snake_case__ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
A__ = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(__UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'tf' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
A__ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A__ , A__ , A__ , A__ = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ['input_ids', 'attention_mask', 'token_type_ids']
A__ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 221 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCamelCase = 637_8137.0
UpperCamelCase = 635_6752.31_4245
UpperCamelCase = 637_8137
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
lowerCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ = (b_lata + b_lata) / 2
lowerCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2)
lowerCAmelCase__ = cos(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2)
lowerCAmelCase__ = sin(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def a__ ( snake_case , snake_case , snake_case = 1 , snake_case = 1 , snake_case = 1.0E4 , snake_case = False , snake_case = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
__SCREAMING_SNAKE_CASE : List[str] = float(embedding_dim // 2 )
__SCREAMING_SNAKE_CASE : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__SCREAMING_SNAKE_CASE : Any = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowercase , 1 ) * jnp.expand_dims(_lowercase , 0 )
# scale embeddings
__SCREAMING_SNAKE_CASE : List[Any] = scale * emb
if flip_sin_to_cos:
__SCREAMING_SNAKE_CASE : Dict = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )] , axis=1 )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )] , axis=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.reshape(_lowercase , [jnp.shape(_lowercase )[0], embedding_dim] )
return signal
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 32
lowerCAmelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_A )
__SCREAMING_SNAKE_CASE : Tuple = nn.silu(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_A )
return temb
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase_ = 32
lowerCAmelCase_ = False
lowerCAmelCase_ = 1
@nn.compact
def __call__( self : Union[str, Any] , _A : Tuple ):
"""simple docstring"""
return get_sinusoidal_embeddings(
_A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : int = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : int = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : Dict = pipe.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='np' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids']
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Any = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Any = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='np' , )
SCREAMING_SNAKE_CASE_ : Tuple = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = embeds
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : int = False
return options
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='np' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = 'open neural network exchange'
SCREAMING_SNAKE_CASE_ : Any = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type='np' )
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = 'open neural network exchange'
SCREAMING_SNAKE_CASE_ : int = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type='np' )
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 0
def test_callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Optional[int] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Optional[int] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'Andromeda galaxy in a bottle'
SCREAMING_SNAKE_CASE_ : Tuple = np.random.RandomState(0 )
pipe(
prompt=lowerCAmelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 368 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 308 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 308 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
snake_case_ : Tuple = (low + high) // 2
snake_case_ ,snake_case_ ,snake_case_ : List[str] = max_subarray(__snake_case , __snake_case , __snake_case )
snake_case_ ,snake_case_ ,snake_case_ : List[str] = max_subarray(__snake_case , mid + 1 , __snake_case )
snake_case_ ,snake_case_ ,snake_case_ : Dict = max_cross_sum(__snake_case , __snake_case , __snake_case , __snake_case )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ ,snake_case_ : Optional[Any] = float('-inf' ), -1
snake_case_ ,snake_case_ : List[str] = float('-inf' ), -1
snake_case_ : str = 0
for i in range(__snake_case , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
snake_case_ : Any = summ
snake_case_ : List[str] = i
snake_case_ : Optional[int] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
snake_case_ : int = summ
snake_case_ : Dict = i
return max_left, max_right, (left_sum + right_sum)
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = [randint(1 , __snake_case ) for _ in range(__snake_case )]
snake_case_ : str = time.time()
max_subarray(__snake_case , 0 , input_size - 1 )
snake_case_ : Union[str, Any] = time.time()
return end - start
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Any = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
snake_case_ : Union[str, Any] = [time_max_subarray(__snake_case ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__snake_case , __snake_case ):
print(__snake_case , '\t\t' , __snake_case )
plt.plot(__snake_case , __snake_case )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356 |
def SCREAMING_SNAKE_CASE__ ( __a , __a = False ):
if not isinstance(__a , __a ):
snake_case_ : str = f"""Expected string as input, found {type(__a )}"""
raise ValueError(__a )
if not isinstance(__a , __a ):
snake_case_ : int = f"""Expected boolean as use_pascal parameter, found {type(__a )}"""
raise ValueError(__a )
snake_case_ : Union[str, Any] = input_str.split('_' )
snake_case_ : int = 0 if use_pascal else 1
snake_case_ : List[Any] = words[start_index:]
snake_case_ : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case_ : Optional[Any] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 88 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCAmelCase :
@property
def a ( self : List[Any] ):
return self.get_dummy_input()
@property
def a ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def a ( self : Optional[int] , _lowercase : int=True , _lowercase : Dict=False , _lowercase : Union[str, Any]=False , _lowercase : Tuple=False , ):
__UpperCAmelCase = 4
__UpperCAmelCase = 32
__UpperCAmelCase = (32, 32)
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = torch.device(lowercase__ )
__UpperCAmelCase = (batch_size, num_channels) + sizes
__UpperCAmelCase = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ )
__UpperCAmelCase = {'''hidden_states''': hidden_states}
if include_temb:
__UpperCAmelCase = 1_28
__UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=lowercase__ , device=lowercase__ )
if include_res_hidden_states_tuple:
__UpperCAmelCase = torch.manual_seed(1 )
__UpperCAmelCase = (randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ ),)
if include_encoder_hidden_states:
__UpperCAmelCase = floats_tensor((batch_size, 32, 32) ).to(lowercase__ )
if include_skip_sample:
__UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase__ , device=lowercase__ )
return dummy_input
def a ( self : Any ):
__UpperCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__UpperCAmelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def a ( self : Tuple , _lowercase : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase = self.block_class(**lowercase__ )
unet_block.to(lowercase__ )
unet_block.eval()
with torch.no_grad():
__UpperCAmelCase = unet_block(**lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__UpperCAmelCase = output[0, -1, -3:, -3:]
__UpperCAmelCase = torch.tensor(lowercase__ ).to(lowercase__ )
assert torch_all_close(output_slice.flatten() , lowercase__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def a ( self : Dict ):
__UpperCAmelCase , __UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase = self.block_class(**lowercase__ )
model.to(lowercase__ )
model.train()
__UpperCAmelCase = model(**lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = output[0]
__UpperCAmelCase = torch.device(lowercase__ )
__UpperCAmelCase = randn_tensor(output.shape , device=lowercase__ )
__UpperCAmelCase = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
loss.backward()
| 332 |
'''simple docstring'''
def _A ( A__ = 10 , A__ = 22 ):
"""simple docstring"""
__lowercase = range(1 , A__ )
__lowercase = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 104 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[int] = ["note_seq"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['note_seq'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def snake_case_ ( cls, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls, ['note_seq'] )
| 361 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str]=False ) -> Optional[Any]:
try:
UpperCamelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase : Optional[Any] = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase ( snake_case__ : int ) -> str:
return unittest.skip('Test was skipped' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Any:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case__ , version=snake_case__ )
return unittest.skipUnless(is_torch_version('>=' , snake_case__ ) , F"""test requires torch version >= {version}""" )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(snake_case__ )
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(snake_case__ )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(snake_case__ )
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = True
@classmethod
def snake_case_ ( cls ) -> Any:
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ) -> Tuple:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = mocks if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( snake_case__ : Tuple ) -> Optional[int]:
UpperCamelCase : Tuple = AcceleratorState()
UpperCamelCase : Tuple = tensor[None].clone().to(state.device )
UpperCamelCase : str = gather(snake_case__ ).cpu()
UpperCamelCase : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case__ ):
return False
return True
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = returncode
UpperCamelCase : Tuple = stdout
UpperCamelCase : Any = stderr
async def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Union[str, Any]:
while True:
UpperCamelCase : List[str] = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(snake_case__ ) )
UpperCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase : Tuple = []
UpperCamelCase : int = []
def tee(snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]="" ):
UpperCamelCase : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ , snake_case__ , file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=snake_case__ , )
return _RunOutput(await p.wait() , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Any=180 , snake_case__ : Any=False , snake_case__ : Optional[int]=True ) -> _RunOutput:
UpperCamelCase : int = asyncio.get_event_loop()
UpperCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) )
UpperCamelCase : str = ' '.join(snake_case__ )
if result.returncode > 0:
UpperCamelCase : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( a__ ):
pass
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str=False ) -> int:
try:
UpperCamelCase : Union[str, Any] = subprocess.check_output(snake_case__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ , 'decode' ):
UpperCamelCase : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 103 | 0 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowerCamelCase ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ : str =[
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
A__ , A__ : Dict =emb.weight.shape
A__ : Tuple =nn.Linear(__snake_case, __snake_case, bias=__snake_case )
A__ : List[str] =emb.weight.data
return lin_layer
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =torch.load(__snake_case, map_location="""cpu""" )
A__ : Optional[int] =Namespace(**checkpoint["""cfg"""]["""model"""] )
A__ : int =checkpoint["""model"""]
remove_ignore_keys_(__snake_case )
A__ : str =state_dict["""decoder.embed_tokens.weight"""].shape[0]
A__ : List[str] ={key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
A__ : Dict =XGLMConfig(
vocab_size=__snake_case, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
A__ : Tuple =XGLMForCausalLM(__snake_case )
A__ : str =model.load_state_dict(__snake_case, strict=__snake_case )
print(__snake_case )
A__ : Any =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__snake_case : Any = parser.parse_args()
__snake_case : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 134 |
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134 | 1 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Any = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.0_1),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
lowerCamelCase = TOKEN
HfFolder.save_token(_a )
@classmethod
def _lowerCAmelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , repo_id="""test-config""" , push_to_hub=_a , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="""valid_org/test-config-org""" , push_to_hub=_a , use_auth_token=self._token )
lowerCamelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowerCamelCase = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase = c.n_embd + 1 # int
lowerCamelCase = c.resid_pdrop + 1.0 # float
lowerCamelCase = not c.scale_attn_weights # bool
lowerCamelCase = c.summary_type + """foo""" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_a , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(_a , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(_a , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(_a , c.summary_type , """mismatch for key: summary_type""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = PretrainedConfig()
lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(_a , _a )]
if len(_a ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f' {", ".join(_a )}.' )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
lowerCamelCase = mock.Mock()
lowerCamelCase = 500
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head:
lowerCamelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self ):
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(_a , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase = ["""config.42.0.0.json"""]
lowerCamelCase = 768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a , """config.4.0.0.json""" ) , os.path.join(_a , """config.42.0.0.json""" ) )
lowerCamelCase = AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size , 768 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase = """v4.0.0"""
lowerCamelCase , lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
_a , return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase = """v3.0.0"""
lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 168 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , use_stable_embedding=_a , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = OpenLlamaModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = OpenLlamaModel(_a )
original_model.to(_a )
original_model.eval()
lowerCamelCase = original_model(_a ).last_hidden_state
lowerCamelCase = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
lowerCamelCase = OpenLlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
lowerCamelCase = scaled_model(_a ).last_hidden_state
lowerCamelCase = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
| 168 | 1 |
"""simple docstring"""
import argparse
import os
import re
_lowercase : Optional[Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
_lowercase : List[str] = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Optional[int] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Any = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : Any = re.compile(r"\[([^\]]+)\]")
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =_re_indent.search(__lowerCamelCase )
return "" if search is None else search.groups()[0]
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple="" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
lowerCamelCase__ : str =0
lowerCamelCase__ : Optional[int] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__lowerCamelCase ):
index += 1
lowerCamelCase__ : Optional[Any] =['''\n'''.join(lines[:index] )]
else:
lowerCamelCase__ : Any =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase__ : Union[str, Any] =[lines[index]]
index += 1
while index < len(__lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__lowerCamelCase ) )
if index < len(__lowerCamelCase ) - 1:
lowerCamelCase__ : Dict =[lines[index + 1]]
index += 1
else:
lowerCamelCase__ : Union[str, Any] =[]
else:
blocks.append('''\n'''.join(__lowerCamelCase ) )
lowerCamelCase__ : Optional[int] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCamelCase ) > 0:
blocks.append('''\n'''.join(__lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
def _inner(__lowerCamelCase : List[Any] ):
return key(__lowerCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
# If no key is provided, we use a noop.
def noop(__lowerCamelCase : Tuple ):
return x
if key is None:
lowerCamelCase__ : List[Any] =noop
# Constants are all uppercase, they go first.
lowerCamelCase__ : List[Any] =[obj for obj in objects if key(__lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase__ : int =[obj for obj in objects if key(__lowerCamelCase )[0].isupper() and not key(__lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase__ : int =[obj for obj in objects if not key(__lowerCamelCase )[0].isupper()]
lowerCamelCase__ : Optional[int] =ignore_underscore(__lowerCamelCase )
return sorted(__lowerCamelCase , key=__lowerCamelCase ) + sorted(__lowerCamelCase , key=__lowerCamelCase ) + sorted(__lowerCamelCase , key=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# This inner function sort imports between [ ].
def _replace(__lowerCamelCase : Dict ):
lowerCamelCase__ : List[str] =match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCamelCase__ : Union[str, Any] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ : Dict =keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__lowerCamelCase )] ) + "]"
lowerCamelCase__ : Dict =import_statement.split('''\n''' )
if len(__lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase__ : Optional[Any] =2 if lines[1].strip() == '''[''' else 1
lowerCamelCase__ : Union[str, Any] =[(i, _re_strip_line.search(__lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase__ : str =sort_objects(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] )
lowerCamelCase__ : int =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase__ : Any =_re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase__ : str =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ : Tuple =keys[:-1]
lowerCamelCase__ : str =get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__lowerCamelCase )] )
return "\n".join(__lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase__ : Dict =_re_bracket_content.sub(_replace , __lowerCamelCase )
return import_statement
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=True ):
"""simple docstring"""
with open(__lowerCamelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase__ : Optional[Any] =split_code_in_indented_blocks(
__lowerCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase__ : Optional[int] =main_blocks[block_idx]
lowerCamelCase__ : int =block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase__ : Union[str, Any] =0
while line_idx < len(__lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase__ : Tuple =len(__lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase__ : Tuple ='''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase__ : List[str] =split_code_in_indented_blocks(__lowerCamelCase , indent_level=__lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase__ : Tuple =_re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase__ : Any =[(pattern.search(__lowerCamelCase ).groups()[0] if pattern.search(__lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase__ : Any =[(i, key) for i, key in enumerate(__lowerCamelCase ) if key is not None]
lowerCamelCase__ : Tuple =[x[0] for x in sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : int =[]
for i in range(len(__lowerCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCamelCase__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase__ : Tuple ='''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : List[str]=True ):
"""simple docstring"""
lowerCamelCase__ : Any =[]
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase__ : List[str] =sort_imports(os.path.join(__lowerCamelCase , '''__init__.py''' ) , check_only=__lowerCamelCase )
if result:
lowerCamelCase__ : Tuple =[os.path.join(__lowerCamelCase , '''__init__.py''' )]
if len(__lowerCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__lowerCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 238 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[int] = "▁"
_lowercase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowercase : Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowercase : Tuple = {
"google/pegasus-xsum": 5_1_2,
}
_lowercase : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Tuple="<mask_2>", lowerCamelCase : int="<mask_1>", lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Optional[int], )-> None:
lowerCamelCase__ : Union[str, Any] =offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
F''' {type(lowerCamelCase )}''' )
lowerCamelCase__ : Any =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase__ : Optional[Any] =additional_special_tokens_extended
else:
lowerCamelCase__ : Tuple =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
lowerCamelCase__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[int] =mask_token_sent
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowerCamelCase__ : Dict[int, str] ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCamelCase__ : Dict[str, int] ={v: k for k, v in self.encoder.items()}
@property
def snake_case ( self : Union[str, Any] )-> int:
return len(self.sp_model ) + self.offset
def snake_case ( self : Optional[Any] )-> Dict[str, int]:
lowerCamelCase__ : List[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
return state
def __setstate__( self : Dict, lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Any, lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : str )-> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase__ : Any =self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def snake_case ( self : Tuple, lowerCamelCase : int )-> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase__ : Any =self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case ( self : List[Any], lowerCamelCase : Optional[int] )-> Any:
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : Tuple =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : str =[]
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any]=False )-> List[str]:
return 1
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : Tuple =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self : Any, lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 238 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
snake_case_ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
snake_case_ = dict(zip(vocab, range(len(vocab))))
snake_case_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = Path(tmpdirname)
snake_case_ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
snake_case_ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
snake_case_ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
snake_case_ = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
snake_case_ = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
snake_case_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
snake_case_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 355 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 216 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 243 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Any = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : str = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__A )
UpperCAmelCase : List[Any] = tokenizer('''This is me''', return_tensors='''pt''' )
UpperCAmelCase : Optional[int] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : List[str] = model.generate(**__A )
UpperCAmelCase : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : Optional[int] = model_reloaded.generate(**__A )
self.assertTrue(torch.allclose(__A, __A ) )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__A )
UpperCAmelCase : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__A ):
model.save_pretrained(__A )
UpperCAmelCase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(__A )
| 355 |
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99 | 0 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase : Tuple = "1"
lowerCamelCase : Optional[Any] = "0"
lowerCamelCase : Dict = "1"
lowerCamelCase : List[str] = ort.SessionOptions()
lowerCamelCase : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
lowerCamelCase : Optional[Any] = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
lowerCamelCase : Optional[int] = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
lowerCamelCase : Dict = ort.RunOptions()
lowerCamelCase : Dict = 1_2_8
lowerCamelCase : Optional[int] = 1
lowerCamelCase : List[str] = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase : List[str] = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase : List[str] = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
lowerCamelCase : List[str] = time.time()
lowerCamelCase : Tuple = 2_0_0_0
lowerCamelCase : List[Any] = {}
for iter in range(max_iters):
lowerCamelCase : List[str] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 47 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = RealmTokenizer
def __init__( self :Optional[Any] ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,__lowercase :Optional[Any]=True ,__lowercase :Optional[int]="[UNK]" ,__lowercase :List[str]="[SEP]" ,__lowercase :List[str]="[PAD]" ,__lowercase :int="[CLS]" ,__lowercase :str="[MASK]" ,__lowercase :Dict=True ,__lowercase :List[str]=None ,**__lowercase :Any ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Optional[int] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : List[Any] = do_lower_case
snake_case__ : Optional[Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Dict = normalizer_class(**__lowercase )
snake_case__ : Tuple = do_lower_case
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,**__lowercase :Any ):
snake_case__ : Dict = PaddingStrategy.MAX_LENGTH
snake_case__ : List[str] = text
snake_case__ : int = kwargs.pop('''text_pair''' ,__lowercase )
snake_case__ : Optional[int] = kwargs.pop('''return_tensors''' ,__lowercase )
snake_case__ : str = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
snake_case__ : Optional[int] = batch_text_pair[idx]
else:
snake_case__ : Tuple = None
snake_case__ : List[str] = super().__call__(__lowercase ,__lowercase ,return_tensors=__lowercase ,**__lowercase )
snake_case__ : Optional[Any] = encoded_candidates.get('''input_ids''' )
snake_case__ : Optional[Any] = encoded_candidates.get('''attention_mask''' )
snake_case__ : List[Any] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
snake_case__ : Any = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase ,tensor_type=__lowercase )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :Tuple=None ):
snake_case__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Tuple = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 230 | 0 |
'''simple docstring'''
A__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Tuple = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__snake_case : str = year // 1_00
__snake_case : Tuple = (5 * (century % 4) + 2) % 7
__snake_case : Any = year % 1_00
__snake_case : Optional[int] = centurian % 12
__snake_case : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__snake_case : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__snake_case : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCAmelCase__ : Union[str, Any] = get_tests_dir('fixtures')
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
_A: Any = mock.Mock()
_A: Dict = 5_0_0
_A: Optional[Any] = {}
_A: Optional[Any] = HTTPError
_A: Optional[Any] = {}
# Download this model to make sure it's in the cache.
_A: Union[str, Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head:
_A: int = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
_A: Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def __magic_name__ ( self : Any ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_A: Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_A: Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(lowerCAmelCase_ )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__ ( cls : int ):
"""simple docstring"""
_A: List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def __magic_name__ ( cls : Tuple ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Any = ViTImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_A: List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase_ , repo_id='''test-image-processor''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A: Dict = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = ViTImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_A: List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
_A: Optional[Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __magic_name__ ( self : Any ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
_A: Any = CustomImageProcessor.from_pretrained(lowerCAmelCase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_A: int = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 121 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''informer'''
__UpperCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : str , ):
"""simple docstring"""
# time series specific configuration
_A: Optional[Any] = prediction_length
_A: Optional[Any] = context_length or prediction_length
_A: Dict = distribution_output
_A: List[str] = loss
_A: int = input_size
_A: List[str] = num_time_features
_A: Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_A: str = scaling
_A: Optional[Any] = num_dynamic_real_features
_A: List[Any] = num_static_real_features
_A: Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A: str = cardinality
else:
_A: Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A: List[str] = embedding_dimension
else:
_A: Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_A: int = num_parallel_samples
# Transformer architecture configuration
_A: Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_A: Union[str, Any] = d_model
_A: Optional[Any] = encoder_attention_heads
_A: Optional[Any] = decoder_attention_heads
_A: Optional[Any] = encoder_ffn_dim
_A: Union[str, Any] = decoder_ffn_dim
_A: Any = encoder_layers
_A: str = decoder_layers
_A: List[str] = dropout
_A: Any = attention_dropout
_A: Optional[int] = activation_dropout
_A: List[Any] = encoder_layerdrop
_A: str = decoder_layerdrop
_A: int = activation_function
_A: Tuple = init_std
_A: Union[str, Any] = use_cache
# Informer
_A: Union[str, Any] = attention_type
_A: str = sampling_factor
_A: List[str] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "inv_freq":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , lowerCAmelCase__ )
if "pos_bias_u" in name:
SCREAMING_SNAKE_CASE__ = None
elif "pos_bias_v" in name:
SCREAMING_SNAKE_CASE__ = None
elif "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE__ = 'running_mean'
elif "inv_freq" in name:
SCREAMING_SNAKE_CASE__ = 'inv_freq'
elif "running_var" in name:
SCREAMING_SNAKE_CASE__ = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE__ = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> int:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act='swish' )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
SCREAMING_SNAKE_CASE__ = 'rotary'
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE__ = fairseq.tasks.setup_task(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 366 |
import math
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 169 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Optional[int] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase :int = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
UpperCamelCase :List[Any] = F'''{src_lang}-{tgt_lang}'''
UpperCamelCase :Optional[int] = F'''\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'''
model_card_dir.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase :Optional[Any] = os.path.join(__UpperCAmelCase , '''README.md''' )
print(F'''Generating {path}''' )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
__snake_case = Path(__file__).resolve().parent.parent.parent
__snake_case = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 259 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> str:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> str:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[str]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 225 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( __snake_case : list[float] ):
lowercase_ : str = 0.00
lowercase_ : str = 0
for resistor in resistors:
if resistor <= 0:
lowercase_ : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__snake_case )
first_sum += 1 / float(__snake_case )
index += 1
return 1 / first_sum
def lowercase ( __snake_case : list[float] ):
lowercase_ : Optional[int] = 0.00
lowercase_ : Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase_ : str = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | """simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = 'time_series_transformer'
lowerCAmelCase__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "student_t" ,__UpperCAmelCase = "nll" ,__UpperCAmelCase = 1 ,__UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] ,__UpperCAmelCase = "mean" ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = True ,__UpperCAmelCase = "gelu" ,__UpperCAmelCase = 64 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 1_00 ,__UpperCAmelCase = 0.0_2 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[str]:
# time series specific configuration
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A__ = embedding_dimension
else:
A__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(__UpperCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 221 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase_ = [8, 5, 9, 7]
lowerCAmelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_, lowercase_, ) -> None:
snake_case = claim_vector
snake_case = allocated_resources_table
snake_case = maximum_claim_table
def _lowerCamelCase ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowerCamelCase ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowerCamelCase ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowerCamelCase ( self ) -> dict[int, list[int]]:
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def _lowerCamelCase ( self, **lowercase_ ) -> None:
snake_case = self.__need()
snake_case = self.__allocated_resources_table
snake_case = self.__available_resources()
snake_case = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
snake_case = False
for each_need in need_list:
snake_case = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
snake_case = False
break
if execution:
snake_case = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
snake_case = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def _lowerCamelCase ( self ) -> str:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(lowercase_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(lowercase_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( _snake_case , _snake_case ):
@register_to_config
def __init__( self : str , *,
_lowercase : Any = 4 , _lowercase : Optional[int] = 7_68 , _lowercase : Dict , _lowercase : Union[str, Any] , ):
super().__init__()
__UpperCAmelCase = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
__UpperCAmelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
__UpperCAmelCase = clip_extra_context_tokens
__UpperCAmelCase = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__UpperCAmelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = nn.LayerNorm(UpperCamelCase__ )
def a ( self : Optional[int] , *, _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : str ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__UpperCAmelCase = image_embeddings.shape[0]
__UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__UpperCAmelCase = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
__UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__UpperCAmelCase = self.embedding_proj(UpperCamelCase__ )
__UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
__UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__UpperCAmelCase = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
__UpperCAmelCase = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
__UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__UpperCAmelCase = self.encoder_hidden_states_proj(UpperCamelCase__ )
__UpperCAmelCase = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
__UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 332 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mvp"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=50267 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=100 , UpperCamelCase__=800 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCamelCase__ ):
A_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 162 | 0 |
import argparse
import datetime
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : str = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
lowercase : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
lowercase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
lowercase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
lowercase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
lowercase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
lowercase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
lowercase : List[str] = datetime.date(int(_UpperCamelCase ), int(_UpperCamelCase ), int(_UpperCamelCase ) )
# Start math
if m <= 2:
lowercase : Tuple = y - 1
lowercase : Tuple = m + 12
# maths var
lowercase : int = int(str(_UpperCamelCase )[:2] )
lowercase : int = int(str(_UpperCamelCase )[2:] )
lowercase : int = int(2.6 * m - 5.3_9 )
lowercase : int = int(c / 4 )
lowercase : int = int(k / 4 )
lowercase : int = int(d + k )
lowercase : int = int(t + u + v + x )
lowercase : int = int(z - (2 * c) )
lowercase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
lowercase : str = f"""Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__a = parser.parse_args()
zeller(args.date_input)
| 173 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__a = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
__a = '''▁'''
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Optional[Any] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : List[str] = do_lower_case
lowercase : Tuple = remove_space
lowercase : Tuple = keep_accents
lowercase : str = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Any = {}
lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.remove_space:
lowercase : int = ''' '''.join(inputs.strip().split() )
else:
lowercase : List[Any] = inputs
lowercase : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase : Optional[Any] = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowercase : Union[str, Any] = outputs.lower()
return outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Optional[int] = cur_pieces[1:]
else:
lowercase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = []
lowercase : Tuple = ''''''
lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowercase : Union[str, Any] = True
lowercase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[int] = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 173 | 1 |
'''simple docstring'''
__snake_case ={
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.3_5_5_8_1_8,
}
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''funnel'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , lowercase=3_0_5_2_2 , lowercase=[4, 4, 4] , lowercase=None , lowercase=2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=6_4 , lowercase=3_0_7_2 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=None , lowercase=1E-9 , lowercase="mean" , lowercase="relative_shift" , lowercase=True , lowercase=True , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Dict = vocab_size
A_ : Union[str, Any] = block_sizes
A_ : Union[str, Any] = [1] * len(lowercase ) if block_repeats is None else block_repeats
assert len(lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A_ : Optional[Any] = num_decoder_layers
A_ : Optional[int] = d_model
A_ : Union[str, Any] = n_head
A_ : Tuple = d_head
A_ : Optional[Any] = d_inner
A_ : List[Any] = hidden_act
A_ : List[str] = hidden_dropout
A_ : Optional[Any] = attention_dropout
A_ : str = activation_dropout
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = initializer_std
A_ : Dict = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
A_ : List[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
A_ : str = attention_type
A_ : Optional[int] = separate_cls
A_ : List[Any] = truncate_seq
A_ : Dict = pool_q_only
super().__init__(**lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 368 | _UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = input('Enter message: ' )
A_ : int = input('Enter key [alphanumeric]: ' )
A_ : Optional[Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
A_ : List[Any] = 'encrypt'
A_ : int = encrypt_message(__lowercase ,__lowercase )
elif mode.lower().startswith('d' ):
A_ : Optional[Any] = 'decrypt'
A_ : Dict = decrypt_message(__lowercase ,__lowercase )
print(f'''\n{mode.title()}ed message:''' )
print(__lowercase )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'encrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'decrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Tuple = []
A_ : str = 0
A_ : Optional[int] = key.upper()
for symbol in message:
A_ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A_ : str = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main()
| 192 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE_: Union[str, Any] =float('nan')
class __A :
def __init__(self : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = sys.stdout
UpperCAmelCase_ = open(A_ , "a" )
def __getattr__(self : Dict , __a : Optional[int] ):
return getattr(self.stdout , A_ )
def _lowercase (self : Any , __a : Any ):
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , A_ , 0 , re.M ) )
def lowerCAmelCase_ ( snake_case_ : str=80 , snake_case_ : int=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = []
# deal with critical env vars
UpperCAmelCase_ = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
UpperCAmelCase_ = os.environ.get(__UpperCamelCase , __UpperCamelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCAmelCase_ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
while len(__UpperCamelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__UpperCamelCase )
UpperCAmelCase_ = ''''''
return "\\\n".join(__UpperCamelCase )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
UpperCAmelCase_ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCAmelCase_ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
UpperCAmelCase_ = subprocess.run(__UpperCamelCase , capture_output=__UpperCamelCase , text=__UpperCamelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
UpperCAmelCase_ = variation.replace(" " , "-" )
with open(Path(__UpperCamelCase ) / f"""log.{prefix}.stdout.txt""" , "w" ) as f:
f.write(result.stdout )
with open(Path(__UpperCamelCase ) / f"""log.{prefix}.stderr.txt""" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.load(__UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
UpperCAmelCase_ = f"""{preamble}: """
UpperCAmelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__UpperCamelCase ) , desc=__UpperCamelCase , leave=__UpperCamelCase ):
UpperCAmelCase_ = process_run_single(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(__UpperCamelCase ):
metrics.append(__UpperCamelCase )
results.append(__UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
UpperCAmelCase_ = f"""\33[2K\r{outcome}"""
if len(__UpperCamelCase ) > 0:
UpperCAmelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCAmelCase_ = round(mean_metrics[target_metric_key] , 2 )
UpperCAmelCase_ = f"""{outcome} {mean_target}"""
if len(__UpperCamelCase ) > 1:
results_str += f""" {tuple(round(__UpperCamelCase , 2 ) for x in results )}"""
print(__UpperCamelCase )
UpperCAmelCase_ = variation
return mean_metrics
else:
print(__UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = pd.DataFrame(__UpperCamelCase )
UpperCAmelCase_ = '''variation'''
UpperCAmelCase_ = '''diff_%'''
UpperCAmelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCAmelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
UpperCAmelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__UpperCamelCase ):
UpperCAmelCase_ = df.apply(
lambda snake_case_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
UpperCAmelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCAmelCase_ = df.reindex(__UpperCamelCase , axis="columns" ) # reorder cols
# capitalize
UpperCAmelCase_ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
UpperCAmelCase_ = df.rename(lambda snake_case_ : c.replace("_" , "<br>" ) , axis="columns" )
UpperCAmelCase_ = df.rename(lambda snake_case_ : c.replace("_" , "\n" ) , axis="columns" )
UpperCAmelCase_ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__UpperCamelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__UpperCamelCase , floatfmt=".2f" )]
print("\n\n".join(__UpperCamelCase ) )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=__UpperCamelCase , type=__UpperCamelCase , nargs="+" , required=__UpperCamelCase , help="Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'" , )
parser.add_argument(
"--base-variation" , default=__UpperCamelCase , type=__UpperCamelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=__UpperCamelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=__UpperCamelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=__UpperCamelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=__UpperCamelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.output_dir
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
UpperCAmelCase_ = get_base_command(__UpperCamelCase , __UpperCamelCase )
# split each dimension into its --foo variations
UpperCAmelCase_ = [list(map(str.strip , re.split(R"\|" , __UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCAmelCase_ = list(map(str.strip , map(" ".join , itertools.product(*__UpperCamelCase ) ) ) )
UpperCAmelCase_ = max(len(__UpperCamelCase ) for x in variations )
# split wanted keys
UpperCAmelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCAmelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
UpperCAmelCase_ = Tee(__UpperCamelCase )
print(f"""\n*** Running {len(__UpperCamelCase )} benchmarks:""" )
print(f"""Base command: {" ".join(__UpperCamelCase )}""" )
UpperCAmelCase_ = '''variation'''
UpperCAmelCase_ = []
for id, variation in enumerate(tqdm(__UpperCamelCase , desc="Total completion: " , leave=__UpperCamelCase ) ):
UpperCAmelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.target_metric_key , __UpperCamelCase , args.repeat_times , __UpperCamelCase , args.verbose , ) )
process_results(__UpperCamelCase , args.target_metric_key , __UpperCamelCase , args.base_variation , __UpperCamelCase )
if __name__ == "__main__":
main()
| 1 |
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 63 | def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = max(len(a__ ) , len(a__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
'''simple docstring'''
import requests
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
_a = {'Content-Type': 'application/json'}
_a = requests.post(lowerCAmelCase__ , json={'text': message_body} , headers=lowerCAmelCase__ )
if response.status_code != 2_00:
_a = (
'Request to slack returned an error '
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 168 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A () -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowercase ( *__lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Union[Dict, Any]] = None , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=2 ):
from .. import __version__
a__ = take_from
a__ = ()
if not isinstance(args[0] , __lowerCAmelCase ):
a__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__lowerCAmelCase ).base_version ) >= version.parse(__lowerCAmelCase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
a__ = None
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__lowerCAmelCase ),)
a__ = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(__lowerCAmelCase , __lowerCAmelCase ):
values += (getattr(__lowerCAmelCase , __lowerCAmelCase ),)
a__ = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
a__ = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
a__ = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , __lowerCAmelCase , stacklevel=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0:
a__ = inspect.getouterframes(inspect.currentframe() )[1]
a__ = call_frame.filename
a__ = call_frame.lineno
a__ = call_frame.function
a__ , a__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(__lowerCAmelCase ) == 0:
return
elif len(__lowerCAmelCase ) == 1:
return values[0]
return values
| 361 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 109 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = DanceDiffusionPipeline
_UpperCamelCase : int = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCamelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a__ , use_timestep_embedding=a__ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
_lowerCAmelCase : List[str] = IPNDMScheduler()
_lowerCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = DanceDiffusionPipeline(**a__ )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[Any] = pipe(**a__ )
_lowerCAmelCase : int = output.audios
_lowerCAmelCase : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
return super().test_save_load_local()
@skip_mps
def __A ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ):
return super().test_attention_slicing_forward_pass()
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch_device
_lowerCAmelCase : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Dict = output.audios
_lowerCAmelCase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : int = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
_lowerCAmelCase : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Any = output.audios
_lowerCAmelCase : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Tuple = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 44 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase__ =True
except ImportError:
lowercase__ =False
lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( lowerCAmelCase__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase__ ( __lowercase ):
@staticmethod
def lowerCAmelCase (snake_case_ : ArgumentParser ):
__a : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=snake_case_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=snake_case_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=snake_case_ )
def __init__(self : Dict , snake_case_ : bool , snake_case_ : str , snake_case_ : Dict=None , *snake_case_ : Optional[Any] ):
__a : Union[str, Any] = testing
__a : List[Any] = testing_file
__a : Any = path
def lowerCAmelCase (self : int ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a : Union[str, Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(snake_case_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
__a : Union[str, Any] = (
Path(snake_case_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__a : Union[str, Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
__a : List[Any] = json.load(snake_case_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case_ , extra_context=snake_case_ , )
__a : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
__a : Optional[Any] = json.load(snake_case_ )
__a : str = configuration['''lowercase_modelname''']
__a : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
__a : Any = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a : Optional[int] = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a : Dict = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(snake_case_ , exist_ok=snake_case_ )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=snake_case_ )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(snake_case_ : Union[str, Any] ):
with open(snake_case_ , '''r''' ) as f:
__a : Union[str, Any] = f.readlines()
with open(snake_case_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case_ : str , snake_case_ : str , snake_case_ : List[str] ):
# Create temp file
__a , __a : Tuple = mkstemp()
__a : Optional[Any] = False
with fdopen(snake_case_ , '''w''' ) as new_file:
with open(snake_case_ ) as old_file:
for line in old_file:
new_file.write(snake_case_ )
if line_to_copy_below in line:
__a : Tuple = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case_ )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(snake_case_ , snake_case_ )
# Remove original file
remove(snake_case_ )
# Move new file
move(snake_case_ , snake_case_ )
def skip_units(snake_case_ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case_ : int ):
with open(snake_case_ ) as datafile:
__a : List[Any] = []
__a : int = False
__a : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a : Optional[Any] = line.split('''"''' )[1]
__a : Dict = skip_units(snake_case_ )
elif "# Below: " in line and "##" not in line:
__a : str = line.split('''"''' )[1]
__a : Any = skip_units(snake_case_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case_ , snake_case_ , snake_case_ )
__a : str = []
elif "# Replace with" in line and "##" not in line:
__a : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(snake_case_ )
remove(snake_case_ )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(snake_case_ )
| 216 | 0 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 350 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 317 | 0 |
import os
from collections.abc import Iterator
def snake_case_ ( snake_case = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(A__ ):
lowercase__: Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip('./' )
def snake_case_ ( snake_case ) -> List[str]:
return f'{i * " "}*' if i else "\n##"
def snake_case_ ( snake_case , snake_case ) -> str:
lowercase__: Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def snake_case_ ( snake_case = "." ) -> None:
lowercase__: Optional[Any] = ''
for filepath in sorted(good_file_paths(A__ ) ):
lowercase__: Union[str, Any] = os.path.split(A__ )
if filepath != old_path:
lowercase__: Tuple = print_path(A__ , A__ )
lowercase__: Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__: Any = f'{filepath}/{filename}'.replace(' ' , '%20' )
lowercase__: List[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 196 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads'))
self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks'))
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : int = batch_size
a__ : Tuple = image_size
a__ : Union[str, Any] = num_channels
a__ : str = num_encoder_blocks
a__ : Dict = sr_ratios
a__ : Dict = depths
a__ : Union[str, Any] = hidden_sizes
a__ : str = downsampling_rates
a__ : Tuple = num_attention_heads
a__ : Optional[Any] = is_training
a__ : Union[str, Any] = use_labels
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = scope
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : str = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Any:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = SegformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
a__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.num_labels
a__ : List[str] = SegformerForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : List[str] = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = 1
a__ : Optional[int] = SegformerForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase)
a__ : Optional[Any] = model(lowercase , labels=lowercase)
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : str = config_and_inputs
a__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A : List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = True
__A : Any = False
__A : Any = False
__A : str = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = SegformerModelTester(self)
a__ : Optional[Any] = SegformerConfigTester(self , config_class=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase)
@unittest.skip('SegFormer does not use inputs_embeds')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
a__ : str = True
a__ : List[str] = False
a__ : int = True
a__ : List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
a__ : Dict = sum(self.model_tester.depths)
self.assertEqual(len(lowercase) , lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Dict = True
a__ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : Tuple = (self.model_tester.image_size // 4) ** 2
a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__ : str = (self.model_tester.image_size // 32) ** 2
a__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__ : Dict = len(lowercase)
# Check attention is always last and order is fine
a__ : List[Any] = True
a__ : Any = True
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : int = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : List[Any] = (self.model_tester.image_size // 4) ** 2
a__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Union[str, Any] = outputs.hidden_states
a__ : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase) , lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase):
continue
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[int] = model(**lowercase).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = SegformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Optional[int] = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[int] = model(lowercase)
a__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase)
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[Any] = model(lowercase)
a__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1))
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Any = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__ : Optional[int] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)
a__ : int = outputs.logits.detach().cpu()
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)])
a__ : Optional[Any] = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , lowercase)
a__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowercase)
a__ : Union[str, Any] = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , lowercase)
| 99 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=snake_case_ , )
assert hasattr(self , 'env' )
def _UpperCamelCase ( self , snake_case_=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=snake_case_ , instance_type=self.instance_type , debugger_hook_config=snake_case_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
TrainingJobAnalytics(snake_case_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , snake_case_ )
| 274 | '''simple docstring'''
snake_case__ : Optional[Any] = tuple[float, float, float]
snake_case__ : Tuple = tuple[float, float, float]
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
"""simple docstring"""
UpperCAmelCase_ : Any = end_pointa[0] - end_pointa[0]
UpperCAmelCase_ : Optional[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase_ : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase_ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
"""simple docstring"""
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 10 ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 274 | 1 |
UpperCAmelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase__ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _a ( a :int , a :int , a :int ) -> str:
assert len(str(a ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 100
a = (5 * (century % 4) + 2) % 7
a = year % 100
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _snake_case :
def __init__( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = data
snake_case_ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def lowerCAmelCase__ ( a__ , a__ ) -> int:
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
snake_case_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = list(struct.unpack(">16L" , _SCREAMING_SNAKE_CASE ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.padding()
snake_case_ = self.split_blocks()
for block in self.blocks:
snake_case_ = self.expand_block(_SCREAMING_SNAKE_CASE )
snake_case_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case_ = (b & c) | ((~b) & d)
snake_case_ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
snake_case_ = b ^ c ^ d
snake_case_ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
snake_case_ = (b & c) | (b & d) | (c & d)
snake_case_ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
snake_case_ = b ^ c ^ d
snake_case_ = 0XC_A_6_2_C_1_D_6
snake_case_ = (
self.rotate(_SCREAMING_SNAKE_CASE , 5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(_SCREAMING_SNAKE_CASE , 30 ),
c,
d,
)
snake_case_ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = B"Test String"
assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
snake_case_ = parser.parse_args()
snake_case_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
snake_case_ = f.read()
else:
snake_case_ = bytes(__snake_case , "utf-8" )
print(SHAaHash(__snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
def UpperCamelCase_( snake_case : list[int] , snake_case : int ):
'''simple docstring'''
snake_case_ = len(snake_case )
snake_case_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case_ = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase : int = get_logger(__name__)
_lowerCAmelCase : Any = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase ( lowerCAmelCase ):
@add_start_docstrings(lowerCamelCase )
def __call__( self :List[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int , **lowerCamelCase :str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
else:
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :str , lowerCamelCase :float ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCAmelCase__ = temperature
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , lowerCamelCase :float , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> Union[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCAmelCase__ = top_p
UpperCAmelCase__ = filter_value
UpperCAmelCase__ = min_tokens_to_keep
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , scores.shape[-1] )
UpperCAmelCase__ = jnp.full_like(lowerCamelCase , self.filter_value )
UpperCAmelCase__ = jax.nn.softmax(lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ = jnp.roll(lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase )
# min tokens to keep
UpperCAmelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase )
UpperCAmelCase__ = jnp.where(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jax.lax.sort_key_val(lowerCamelCase , lowerCamelCase )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> List[str]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = filter_value
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = scores.shape
UpperCAmelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ = topk_scores.flatten()
UpperCAmelCase__ = topk_indices.flatten() + shift
UpperCAmelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase )
UpperCAmelCase__ = next_scores_flat.reshape(lowerCamelCase , lowerCamelCase )
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Any , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = bos_token_id
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Tuple , lowerCamelCase :int , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = max_length
UpperCAmelCase__ = eos_token_id
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :int , lowerCamelCase :int ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCAmelCase__ = min_length
UpperCAmelCase__ = eos_token_id
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :List[str] , lowerCamelCase :str ) -> Any:
UpperCAmelCase__ = list(lowerCamelCase )
UpperCAmelCase__ = begin_index
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[str] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :list ) -> Tuple:
UpperCAmelCase__ = list(lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = dict(lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ = force_token_array.at[index].set(lowerCamelCase )
UpperCAmelCase__ = jnp.intaa(lowerCamelCase )
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
def _force_token(lowerCamelCase :str ):
UpperCAmelCase__ = scores.shape[0]
UpperCAmelCase__ = self.force_token_array[generation_idx]
UpperCAmelCase__ = jnp.ones_like(lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ = lax.dynamic_update_slice(lowerCamelCase , lowerCamelCase , (0, current_token) )
return new_scores
UpperCAmelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple ) -> Dict:
UpperCAmelCase__ = generate_config.eos_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase , "max_initial_timestamp_index" ):
UpperCAmelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :Any ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase :int , lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase , )
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase , lowerCamelCase , )
return jnp.where(
lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase , )
UpperCAmelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ = jnp.where(
lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
def handle_cumulative_probs(lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] ):
UpperCAmelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
return scores
| 169 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=A_ ):
lowercase__ = ['''speech''']
def __init__( self : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["speech"] )
class UpperCAmelCase_ ( metaclass=A_ ):
lowercase__ = ['''speech''']
def __init__( self : int , *snake_case_ : str , **snake_case_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["speech"] )
| 230 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''AutoImageProcessor'''
lowercase__ = '''AutoTokenizer'''
def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
A__ = self.image_processor
def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 230 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
_a : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : int = StableDiffusionLatentUpscalePipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : List[Any] = frozenset([] )
lowerCAmelCase : List[str] = True
@property
def __lowercase ( self : List[str] ):
_a : int = 1
_a : str = 4
_a : str = (16, 16)
_a : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
def __lowercase ( self : List[str] ):
torch.manual_seed(0 )
_a : Tuple = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=_UpperCAmelCase ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=_UpperCAmelCase ,only_cross_attention=_UpperCAmelCase ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
_a : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
_a : Optional[int] = EulerDiscreteScheduler(prediction_type='sample' )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Dict = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowercase ( self : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : int ):
_a : Tuple = 'cpu'
_a : Tuple = self.get_dummy_components()
_a : Any = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : List[Any] = pipe(**_UpperCAmelCase ).images
_a : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
_a : Union[str, Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_a : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase ,1E-3 )
def __lowercase ( self : Any ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __lowercase ( self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __lowercase ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __lowercase ( self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __lowercase ( self : Dict ):
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self : Union[str, Any] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self : Tuple ):
_a : int = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_a : Optional[int] = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**_UpperCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = 2
_a : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_a : int = getattr(_UpperCAmelCase ,scheduler_enum.name )
_a : int = scheduler_cls.from_config(pipe.scheduler.config )
_a : List[str] = pipe(**_UpperCAmelCase )[0]
outputs.append(_UpperCAmelCase )
assert check_same_shape(_UpperCAmelCase )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : List[str] ):
_a : Dict = torch.manual_seed(33 )
_a : str = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
_a : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
_a : Optional[Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_a : Any = pipe(_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='latent' ).images
_a : Optional[int] = upscaler(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_UpperCAmelCase ,output_type='np' ,).images[0]
_a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __lowercase ( self : Optional[int] ):
_a : Any = torch.manual_seed(33 )
_a : Optional[int] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
_a : Tuple = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_a : Optional[Any] = upscaler(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_UpperCAmelCase ,output_type='np' ,).images[0]
_a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 89 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE_:Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to use SortishSampler or not."} )
__lowerCamelCase : Tuple = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCamelCase : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
__lowerCamelCase : Dict = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
__lowerCamelCase : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _lowerCAmelCase ( self ):
A : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = v.to_dict()
return d
| 362 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "conditional_detr"
__lowerCamelCase : str = ["past_key_values"]
__lowerCamelCase : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=3, lowerCamelCase__=300, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__="sine", lowerCamelCase__="resnet50", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=0.25, **lowerCamelCase__, ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = backbone_config.get("""model_type""" )
A : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
A : Tuple = config_class.from_dict(lowerCamelCase__ )
A : Dict = use_timm_backbone
A : int = backbone_config
A : Union[str, Any] = num_channels
A : Optional[Any] = num_queries
A : Union[str, Any] = d_model
A : str = encoder_ffn_dim
A : List[Any] = encoder_layers
A : Tuple = encoder_attention_heads
A : Union[str, Any] = decoder_ffn_dim
A : Tuple = decoder_layers
A : int = decoder_attention_heads
A : Union[str, Any] = dropout
A : List[str] = attention_dropout
A : Optional[int] = activation_dropout
A : Optional[Any] = activation_function
A : Any = init_std
A : List[Any] = init_xavier_std
A : Any = encoder_layerdrop
A : List[str] = decoder_layerdrop
A : int = encoder_layers
A : Union[str, Any] = auxiliary_loss
A : Union[str, Any] = position_embedding_type
A : Tuple = backbone
A : Dict = use_pretrained_backbone
A : int = dilation
# Hungarian matcher
A : List[Any] = class_cost
A : List[Any] = bbox_cost
A : int = giou_cost
# Loss coefficients
A : List[Any] = mask_loss_coefficient
A : Any = dice_loss_coefficient
A : int = cls_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : List[Any] = giou_loss_coefficient
A : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
def _lowerCAmelCase ( self ):
A : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : List[Any] = self.backbone_config.to_dict()
A : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-5
@property
def _lowerCAmelCase ( self ):
return 12
| 115 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_lowercase : List[Any] = [8, 5, 9, 7]
_lowercase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowercase : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : list[int] , _lowercase : list[list[int]] , _lowercase : list[list[int]] , ):
__UpperCAmelCase = claim_vector
__UpperCAmelCase = allocated_resources_table
__UpperCAmelCase = maximum_claim_table
def a ( self : List[str] ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a ( self : Tuple ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a ( self : Tuple ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowercase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a ( self : List[str] ):
return {self.__need().index(_lowercase ): i for i in self.__need()}
def a ( self : List[Any] , **_lowercase : str ):
__UpperCAmelCase = self.__need()
__UpperCAmelCase = self.__allocated_resources_table
__UpperCAmelCase = self.__available_resources()
__UpperCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
__UpperCAmelCase = False
for each_need in need_list:
__UpperCAmelCase = True
for index, need in enumerate(_lowercase ):
if need > available_resources[index]:
__UpperCAmelCase = False
break
if execution:
__UpperCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCAmelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_lowercase )
# update available/freed resources stack
__UpperCAmelCase = np.array(_lowercase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_lowercase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def a ( self : str ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_lowercase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_lowercase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_lowercase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_lowercase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ):
raise RuntimeError('''CUDA out of memory.''' )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
def a ( self : Optional[int] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def a ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowercase : Optional[int] ):
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def a ( self : Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : int ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def a ( self : str ):
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase )
__UpperCAmelCase = release_memory(_lowercase )
self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
| 332 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
snake_case_ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCAmelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def UpperCamelCase__ ( self ):
snake_case_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
snake_case_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case_ = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
snake_case_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_UpperCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
snake_case_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _UpperCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _UpperCAmelCase ) , )
def UpperCamelCase__ ( self ):
snake_case_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
snake_case_ , snake_case_ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ , snake_case_ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCAmelCase )
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ , snake_case_ = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) | 267 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 267 | 1 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_UpperCAmelCase = """us-east-1""" # defaults region
@dataclass
class a :
UpperCamelCase : str
UpperCamelCase : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
UpperCamelCase : List[str] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
UpperCamelCase : Tuple = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return f'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =SageMakerTestEnvironment(framework=request.cls.framework )
| 173 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = KandinskyInpaintPipeline
UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
UpperCamelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
UpperCamelCase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase : Tuple = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_: List[str] =MultilingualCLIP(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dummy_movq
SCREAMING_SNAKE_CASE_: int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: List[str] =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_: Dict =np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: Optional[Any] =0
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu"""
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =self.pipeline_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
**self.get_dummy_inputs(lowerCAmelCase ) , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE_: List[str] =np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""a hat"""
SCREAMING_SNAKE_CASE_: str =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =pipe_prior(
lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(
lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 173 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
snake_case_ = mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case_ = mam_aaa['model']
remove_ignore_keys_(UpperCAmelCase )
snake_case_ = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case_ = MaMaaaConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case_ = state_dict['decoder.embed_tokens.weight']
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase )
model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
snake_case_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 363 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
# save results
if os.path.exists(lowercase_ ):
if os.path.exists(os.path.join(lowercase_ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowercase_ , """config.json""" ) ):
os.remove(os.path.join(lowercase_ , """config.json""" ) )
if os.path.exists(os.path.join(lowercase_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowercase_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowercase_ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowercase_ )
model.save_pretrained(lowercase_ )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> Dict:
UpperCamelCase = 2
if unlogit:
UpperCamelCase = torch.pow(lowercase_ , lowercase_ )
UpperCamelCase = p * torch.log(lowercase_ )
UpperCamelCase = 0
return -plogp.sum(dim=-1 )
def lowercase__ ( __UpperCamelCase )-> List[str]:
logger.info("""lv, h >\t""" + """\t""".join(F"{x + 1}" for x in range(len(lowercase_ ) ) ) )
for row in range(len(lowercase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=False )-> Optional[Any]:
UpperCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
UpperCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
if head_mask is None:
UpperCamelCase = torch.ones(lowercase_ , lowercase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase = None
UpperCamelCase = 0.0
UpperCamelCase = 0.0
for step, inputs in enumerate(tqdm(lowercase_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase = tuple(t.to(args.device ) for t in inputs )
(UpperCamelCase ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase = model(lowercase_ , labels=lowercase_ , head_mask=lowercase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase_ ):
UpperCamelCase = entropy(attn.detach() , lowercase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase = 2
UpperCamelCase = torch.pow(torch.pow(lowercase_ , lowercase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowercase_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowercase_ )
logger.info("""Head ranked by importance scores""" )
UpperCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase = head_ranks.view_as(lowercase_ )
print_ad_tensor(lowercase_ )
return attn_entropy, head_importance, total_loss
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = compute_heads_importance(lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ )
UpperCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowercase_ , original_score * args.masking_threshold )
UpperCamelCase = torch.ones_like(lowercase_ )
UpperCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase = float("""Inf""" )
UpperCamelCase = head_importance.view(-1 ).sort()[1]
if len(lowercase_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCamelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase = new_head_mask.view(-1 )
UpperCamelCase = 0.0
UpperCamelCase = new_head_mask.view_as(lowercase_ )
UpperCamelCase = new_head_mask.clone().detach()
print_ad_tensor(lowercase_ )
# Compute metric and head importance again
UpperCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , head_mask=lowercase_ )
UpperCamelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowercase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowercase_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = datetime.now()
UpperCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ )
UpperCamelCase = 1 / loss
UpperCamelCase = datetime.now() - before_time
UpperCamelCase = sum(p.numel() for p in model.parameters() )
UpperCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = [
v,
]
assert sum(len(lowercase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase_ )
UpperCamelCase = sum(p.numel() for p in model.parameters() )
UpperCamelCase = datetime.now()
UpperCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ , actually_pruned=lowercase_ , )
UpperCamelCase = 1 / loss
UpperCamelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowercase_ , lowercase_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowercase_ , lowercase_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowercase_ , args.output_dir )
def lowercase__ ( )-> Any:
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowercase_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowercase_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowercase_ , type=lowercase_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowercase_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowercase_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowercase_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowercase_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowercase_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowercase_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowercase_ , default=42 )
parser.add_argument("""--local_rank""" , type=lowercase_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowercase_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowercase_ , default="""""" , help="""Can be used for distant debugging.""" )
UpperCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase = torch.device("""cuda""" , args.local_rank )
UpperCamelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase = nn.parallel.DistributedDataParallel(
lowercase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase_ )
elif args.n_gpu > 1:
UpperCamelCase = nn.DataParallel(lowercase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase_ )
torch.save(lowercase_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Prepare dataset
UpperCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase = (torch.from_numpy(lowercase_ ),)
UpperCamelCase = TensorDataset(*lowercase_ )
UpperCamelCase = RandomSampler(lowercase_ )
UpperCamelCase = DataLoader(lowercase_ , sampler=lowercase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase_ , lowercase_ , lowercase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase = mask_heads(lowercase_ , lowercase_ , lowercase_ )
prune_heads(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 321 |
import string
def UpperCamelCase (lowercase_: str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
A__ : Dict = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ : Dict = string.ascii_uppercase.find(lowercase_ )
A__ : Optional[int] = num - key
if num < 0:
A__ : Optional[int] = num + len(string.ascii_uppercase )
A__ : Any = translated + string.ascii_uppercase[num]
else:
A__ : Optional[Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def UpperCamelCase () -> None:
A__ : Optional[Any] = input("""Encrypted message: """ )
A__ : Optional[Any] = message.upper()
decrypt(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Union[str, Any] , __A : Any , __A : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 360 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =KandinskyVaaImgaImgPipeline
__a =['image_embeds', 'negative_image_embeds', 'image']
__a =[
'image_embeds',
'negative_image_embeds',
'image',
]
__a =[
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__a =False
@property
def UpperCamelCase__ ( self : int ):
return 32
@property
def UpperCamelCase__ ( self : int ):
return 32
@property
def UpperCamelCase__ ( self : Any ):
return self.time_input_dim
@property
def UpperCamelCase__ ( self : Dict ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self : int ):
return 1_00
@property
def UpperCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
_a = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_a = UNetaDConditionModel(**__a )
return model
@property
def UpperCamelCase__ ( self : Any ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self : Tuple ):
_a = self.dummy_unet
_a = self.dummy_movq
_a = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_a = DDIMScheduler(**__a )
_a = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCamelCase__ ( self : Optional[Any] , __a : Any , __a : Union[str, Any]=0 ):
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = pipe(**self.get_dummy_inputs(__a ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : int ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_a = "A red cartoon frog, 4k"
_a = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
_a = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
_a = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a , _a = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_a = pipeline(
image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="np" , )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
| 63 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A : Union[str, Any] = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(_UpperCAmelCase ), version.parse(_UpperCAmelCase ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = None ) -> None:
'''simple docstring'''
lowerCAmelCase : Tuple = f"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$', _UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = requirement, None, None
else:
lowerCAmelCase : Tuple = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)', _UpperCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f" got {requirement}" )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = match[0]
lowerCAmelCase : Union[str, Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase : List[str] = {}
for w in want_range:
lowerCAmelCase : Dict = re.findall(r'^([\s!=<>]{1,2})(.+)', _UpperCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f" but got {requirement}" )
lowerCAmelCase , lowerCAmelCase : List[str] = match[0]
lowerCAmelCase : List[str] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase : Optional[int] = '.'.join([str(_UpperCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return
# check if any version is installed
try:
lowerCAmelCase : Tuple = importlib.metadata.version(_UpperCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : List[str] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_UpperCAmelCase, _UpperCAmelCase )
| 323 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base")
lowerCamelCase__: Optional[int] =AutoTokenizer.from_pretrained("xlm-roberta-base")
lowerCamelCase__: Any ="""The dog is cute and lives in the garden house"""
lowerCamelCase__: List[Any] =jnp.array([tokenizer.encode(_SCREAMING_SNAKE_CASE)])
lowerCamelCase__: Any =(1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__: Optional[Any] =jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
lowerCamelCase__: int =model(_SCREAMING_SNAKE_CASE)["""last_hidden_state"""]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1E-3))
| 10 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A: str = logging.get_logger(__name__)
A: List[Any] = {"vocab_file": "vocab.txt"}
A: List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A: Dict = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def _snake_case ( UpperCamelCase : int ):
with open(UpperCamelCase , """r""" ) as f:
UpperCAmelCase : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Any = unk_token
UpperCAmelCase : str = cls_token
UpperCAmelCase : int = pad_token
UpperCAmelCase : Tuple = mask_token
UpperCAmelCase : str = eos_token
UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : str = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
'''simple docstring'''
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 109 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
snake_case : Tuple = '''src/diffusers'''
# Matches is_xxx_available()
snake_case : List[str] = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
snake_case : int = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
snake_case : List[str] = '''
{0} = None
'''
snake_case : Optional[Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
snake_case : Tuple = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def __lowercase ( ):
with open(os.path.join(__lowerCAmelCase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
# Get to the point we do the actual imports for type checking
a__ = 0
a__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
a__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
a__ = lines[line_index]
a__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
a__ = objects
else:
line_index += 1
return backend_specific_objects
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[Any]=None ):
if backend_specific_objects is None:
a__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a__ = {}
for backend, objects in backend_specific_objects.items():
a__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
a__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
a__ = dummy_file
return dummy_files
def __lowercase ( __lowerCAmelCase : Tuple=False ):
a__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
a__ = os.path.join(__lowerCAmelCase , 'utils' )
a__ = {
backend: os.path.join(__lowerCAmelCase , F'dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py' )
for backend in dummy_files.keys()
}
a__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ = f.read()
else:
a__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
snake_case : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 109 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = """"""
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return data[1:] + data[0]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = """"""
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = int('0b' + data[0] + data[-1] , 2 )
snake_case_ = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = message[:4]
snake_case_ = message[4:]
snake_case_ = apply_table(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[:4] ) # noqa: E741
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[4:] )
snake_case_ = """0""" * (2 - len(SCREAMING_SNAKE_CASE__ )) + l # noqa: E741
snake_case_ = """0""" * (2 - len(SCREAMING_SNAKE_CASE__ )) + r
snake_case_ = apply_table(l + r , SCREAMING_SNAKE_CASE__ )
snake_case_ = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return temp + right
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = input("""Enter 10 bit key: """)
_UpperCAmelCase : Tuple = input("""Enter 8 bit message: """)
_UpperCAmelCase : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
_UpperCAmelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_UpperCAmelCase : Union[str, Any] = [2, 4, 3, 1]
_UpperCAmelCase : str = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCAmelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCAmelCase : List[str] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCAmelCase : Optional[Any] = apply_table(key, paa_table)
_UpperCAmelCase : str = temp[:5]
_UpperCAmelCase : Any = temp[5:]
_UpperCAmelCase : Optional[Any] = left_shift(left)
_UpperCAmelCase : Dict = left_shift(right)
_UpperCAmelCase : str = apply_table(left + right, pa_table)
_UpperCAmelCase : int = left_shift(left)
_UpperCAmelCase : Optional[int] = left_shift(right)
_UpperCAmelCase : List[Any] = left_shift(left)
_UpperCAmelCase : Any = left_shift(right)
_UpperCAmelCase : List[str] = apply_table(left + right, pa_table)
# encryption
_UpperCAmelCase : Any = apply_table(message, IP)
_UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : str = temp[4:] + temp[:4]
_UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Dict = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_UpperCAmelCase : str = apply_table(CT, IP)
_UpperCAmelCase : List[str] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Tuple = temp[4:] + temp[:4]
_UpperCAmelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _a ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase = IFPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self._get_dummy_components()
def snake_case ( self : Optional[Any], lowerCAmelCase__ : List[str], lowerCAmelCase__ : Tuple=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
_UpperCamelCase : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
_UpperCamelCase : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_UpperCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self : Dict ) -> int:
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self : Optional[Any] ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : str ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def snake_case ( self : Any ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''', variant='''fp16''', torch_dtype=torch.floataa )
_UpperCamelCase : Any = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''', variant='''fp16''', torch_dtype=torch.floataa, text_encoder=UpperCamelCase__, tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
_UpperCamelCase , _UpperCamelCase : Dict = pipe_a.encode_prompt('''anime turtle''', device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCamelCase : Any = None
_UpperCamelCase : Any = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCamelCase : int = IFImgaImgPipeline(**pipe_a.components )
_UpperCamelCase : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCamelCase : Tuple = IFInpaintingPipeline(**pipe_a.components )
_UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def snake_case ( self : Tuple, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : str, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCamelCase : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : str = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, num_inference_steps=2, generator=UpperCamelCase__, output_type='''np''', )
_UpperCamelCase : Tuple = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_UpperCamelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
_UpperCamelCase : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCamelCase : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : int = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, image=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=2, output_type='''np''', )
_UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_UpperCamelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_UpperCamelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
def snake_case ( self : int, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : str, lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : List[str] = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, image=UpperCamelCase__, num_inference_steps=2, generator=UpperCamelCase__, output_type='''np''', )
_UpperCamelCase : Optional[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_UpperCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
_UpperCamelCase : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCamelCase : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Union[str, Any] = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, image=UpperCamelCase__, original_image=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=2, output_type='''np''', )
_UpperCamelCase : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_UpperCamelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
def snake_case ( self : int, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(1 ) ).to(UpperCamelCase__ )
_UpperCamelCase : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : Dict = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, image=UpperCamelCase__, mask_image=UpperCamelCase__, num_inference_steps=2, generator=UpperCamelCase__, output_type='''np''', )
_UpperCamelCase : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_UpperCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
_UpperCamelCase : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCamelCase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(0 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(1 ) ).to(UpperCamelCase__ )
_UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=UpperCamelCase__, negative_prompt_embeds=UpperCamelCase__, image=UpperCamelCase__, mask_image=UpperCamelCase__, original_image=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=2, output_type='''np''', )
_UpperCamelCase : int = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_UpperCamelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_UpperCamelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
def a_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 363 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a_ ( _lowercase ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
import argparse
import os
import re
import packaging.version
A : Dict = '''examples/'''
A : Optional[Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A : List[str] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
A : List[str] = '''README.md'''
def __lowerCamelCase ( __a :List[Any] , __a :str , __a :Tuple ) -> Any:
"""simple docstring"""
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.read()
A__ , A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("""VERSION""" , __a )
A__ = re_pattern.sub(__a , __a )
with open(__a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__a )
def __lowerCamelCase ( __a :int ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern="""examples""" )
def __lowerCamelCase ( __a :int , __a :Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ = """🤗 Transformers currently provides the following architectures"""
A__ = """1. Want to contribute a new model?"""
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
A__ = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__a )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["""init"""][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __lowerCamelCase ( __a :Any=False ) -> Union[str, Any]:
"""simple docstring"""
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A__ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A__ = input(F'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
A__ = default_version
print(F'Updating version to {version}.' )
global_version_update(__a , patch=__a )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = get_version()
A__ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
A__ = dev_version
print(F'Updating version to {version}.' )
global_version_update(__a )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 274 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : str = 0
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : Union[str, Any] = tuple[int, int]
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def a_ ( self : Dict ) -> float:
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , __lowerCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase )
A__ = [self.start]
A__ = []
A__ = False
def a_ ( self : List[str] ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
return [self.start.pos]
def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None:
"""simple docstring"""
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = False
def a_ ( self : int ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
self.fwd_astar.closed_nodes.append(__lowerCAmelCase )
self.bwd_astar.closed_nodes.append(__lowerCAmelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCAmelCase )
else:
astar.open_nodes.append(__lowerCAmelCase )
return [self.fwd_astar.start.pos]
def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(__lowerCAmelCase )
A__ = self.bwd_astar.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Optional[int] = (0, 0)
A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Dict = time.time()
A : Optional[Any] = AStar(init, goal)
A : Optional[int] = a_star.search()
A : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
A : Dict = time.time()
A : Tuple = BidirectionalAStar(init, goal)
A : List[Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 274 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase__ : Tuple = numpy.array([0, 0])
lowercase__ : Optional[Any] = numpy.array([0.5, 0.866_0254])
lowercase__ : Any = numpy.array([1, 0])
lowercase__ : Union[str, Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( lowercase : list[numpy.ndarray], lowercase : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_UpperCamelCase = initial_vectors
for _ in range(lowercase ):
_UpperCamelCase = iteration_step(lowercase )
return vectors
def a__ ( lowercase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_UpperCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase = vectors[i + 1]
new_vectors.append(lowercase )
_UpperCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( lowercase : numpy.ndarray, lowercase : float ) -> numpy.ndarray:
"""simple docstring"""
_UpperCamelCase = numpy.radians(lowercase )
_UpperCamelCase , _UpperCamelCase = numpy.cos(lowercase ), numpy.sin(lowercase )
_UpperCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase, lowercase )
def a__ ( lowercase : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_UpperCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase = zip(*lowercase )
plt.plot(lowercase, lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Union[str, Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 287 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( lowercase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCamelCase = int(lowercase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Any=300 ) -> Any:
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a__ ( lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCamelCase = F"""{elt:.6f}""" if isinstance(lowercase, lowercase ) else str(lowercase )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = 5
_snake_case : Optional[int] = 0.2
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase__ : int = 300 , ) -> int:
'''simple docstring'''
_UpperCamelCase = total
_UpperCamelCase = '''''' if prefix is None else prefix
_UpperCamelCase = leave
_UpperCamelCase = parent
_UpperCamelCase = width
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : str = None ) -> Dict:
'''simple docstring'''
_UpperCamelCase = value
if comment is not None:
_UpperCamelCase = comment
if self.last_value is None:
_UpperCamelCase = _UpperCamelCase = time.time()
_UpperCamelCase = _UpperCamelCase = value
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = self.warmup
_UpperCamelCase = 1
self.update_bar(lowerCAmelCase__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCamelCase = time.time()
_UpperCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCamelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCamelCase = None
if value >= self.total:
_UpperCamelCase = self.total
_UpperCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase__ )
_UpperCamelCase = value
_UpperCamelCase = current_time
if self.average_time_per_item is None:
_UpperCamelCase = 1
else:
_UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase__ ) )) + str(lowerCAmelCase__ )
if self.elapsed_time is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_UpperCamelCase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = None if column_names is None else [column_names]
_UpperCamelCase = None
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.inner_table is None:
_UpperCamelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase__ )
_UpperCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=300 ) -> int:
'''simple docstring'''
_UpperCamelCase = NotebookProgressBar(lowerCAmelCase__ , prefix=lowerCAmelCase__ , parent=self , width=lowerCAmelCase__ )
return self.child_bar
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = None
self.display()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = False
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_UpperCamelCase = False
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if not has_length(lowerCAmelCase__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCamelCase = self.training_tracker.add_child(len(lowerCAmelCase__ ) )
else:
_UpperCamelCase = NotebookProgressBar(len(lowerCAmelCase__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCamelCase = None
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCamelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCamelCase = state.global_step
self.training_tracker.write_line(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if self.training_tracker is not None:
_UpperCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCamelCase = log['''loss''']
break
if self.first_column == "Epoch":
_UpperCamelCase = int(state.epoch )
else:
_UpperCamelCase = state.global_step
_UpperCamelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_UpperCamelCase = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''total_flos''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''epoch''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , lowerCAmelCase__ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
_UpperCamelCase = v
else:
_UpperCamelCase = k.split('''_''' )
_UpperCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_UpperCamelCase = v
self.training_tracker.write_line(lowerCAmelCase__ )
self.training_tracker.remove_child()
_UpperCamelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCamelCase = True
def snake_case__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCAmelCase__ )
_UpperCamelCase = None
| 287 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
a : Dict = [8, 5, 9, 7]
a : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a :
def __init__( self : int , lowercase_ : list[int] , lowercase_ : list[list[int]] , lowercase_ : list[list[int]] , ):
snake_case_ = claim_vector
snake_case_ = allocated_resources_table
snake_case_ = maximum_claim_table
def A_ ( self : Union[str, Any] ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : List[Any] ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : List[str] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : Tuple ):
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def A_ ( self : Optional[Any] , **lowercase_ : Dict ):
snake_case_ = self.__need()
snake_case_ = self.__allocated_resources_table
snake_case_ = self.__available_resources()
snake_case_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
snake_case_ = False
for each_need in need_list:
snake_case_ = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
snake_case_ = False
break
if execution:
snake_case_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case_ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
snake_case_ = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A_ ( self : List[Any] ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase_ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase_ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 92 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
return EnvironmentCommand()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@staticmethod
def _a ( A_ ) -> Dict:
__UpperCamelCase =parser.add_parser('env' )
download_parser.set_defaults(func=A_ )
download_parser.add_argument(
'--accelerate-config_file' , default=A_ , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=A_ )
def __init__( self , A_ , *A_ ) -> None:
__UpperCamelCase =accelerate_config_file
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ='not installed'
if is_safetensors_available():
import safetensors
__UpperCamelCase =safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
__UpperCamelCase =f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
__UpperCamelCase ='not installed'
__UpperCamelCase =__UpperCamelCase ='not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCamelCase =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(A_ ):
__UpperCamelCase =load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCamelCase =(
'\n'.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(A_ , A_ )
else f'\t{accelerate_config}'
)
__UpperCamelCase ='not installed'
__UpperCamelCase ='NA'
if is_torch_available():
import torch
__UpperCamelCase =torch.__version__
__UpperCamelCase =torch.cuda.is_available()
__UpperCamelCase ='not installed'
__UpperCamelCase ='NA'
if is_tf_available():
import tensorflow as tf
__UpperCamelCase =tf.__version__
try:
# deprecated in v2.1
__UpperCamelCase =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCamelCase =bool(tf.config.list_physical_devices('GPU' ) )
__UpperCamelCase ='not installed'
__UpperCamelCase ='not installed'
__UpperCamelCase ='not installed'
__UpperCamelCase ='NA'
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCamelCase =flax.__version__
__UpperCamelCase =jax.__version__
__UpperCamelCase =jaxlib.__version__
__UpperCamelCase =jax.lib.xla_bridge.get_backend().platform
__UpperCamelCase ={
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f'{safetensors_version}',
'Accelerate version': f'{accelerate_version}',
'Accelerate config': f'{accelerate_config_str}',
'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})',
'Tensorflow version (GPU?)': f'{tf_version} ({tf_cuda_available})',
'Flax version (CPU?/GPU?/TPU?)': f'{flax_version} ({jax_backend})',
'Jax version': f'{jax_version}',
'JaxLib version': f'{jaxlib_version}',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def _a ( A_ ) -> Union[str, Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 117 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Tuple = n - 1
snake_case__ : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : List[str] = 0
while count < prec:
snake_case__ : List[str] = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if b != 1:
snake_case__ : List[Any] = True
for _ in range(__lowerCAmelCase ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
snake_case__ : int = import_module('''tasks''' )
try:
snake_case__ : Optional[int] = getattr(__lowerCAmelCase , model_args.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ : Optional[int] = token_classification_task.get_labels(data_args.labels )
snake_case__ : Dict[int, str] = dict(enumerate(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ : Dict = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[List[int], List[int]]:
snake_case__ : Any = np.argmax(__lowerCAmelCase , axis=2 )
snake_case__ , snake_case__ : List[Any] = preds.shape
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
snake_case__ : int = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase ) -> Dict:
snake_case__ , snake_case__ : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
snake_case__ : Any = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
snake_case__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ , snake_case__ , snake_case__ : List[str] = trainer.predict(__lowerCAmelCase )
snake_case__ , snake_case__ : int = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
snake_case__ : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 230 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''xlm-roberta'''
def __init__( self : str , UpperCAmelCase__ : Union[str, Any]=30_522 , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=1e-12 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str="absolute" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 362 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : Optional[Any] = """facebook/wmt19-en-de"""
_lowerCamelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase : int = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_lowerCamelCase : int = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_lowerCamelCase : str = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 231 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase ( lowercase__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowerCAmelCase : Optional[Any] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , *lowerCAmelCase__ :List[Any] , ) -> Dict:
__SCREAMING_SNAKE_CASE : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = model_type
__SCREAMING_SNAKE_CASE : str = tf_checkpoint
__SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_output
__SCREAMING_SNAKE_CASE : str = config
__SCREAMING_SNAKE_CASE : Optional[int] = finetuning_task_name
def __magic_name__( self :int ) -> List[str]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
__SCREAMING_SNAKE_CASE : Dict = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : int = ''''''
else:
__SCREAMING_SNAKE_CASE : int = self._tf_checkpoint
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 9 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"""score""": ANY(UpperCamelCase ),
"""label""": ANY(UpperCamelCase ),
"""box""": {"""xmin""": ANY(UpperCamelCase ), """ymin""": ANY(UpperCamelCase ), """xmax""": ANY(UpperCamelCase ), """ymax""": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : int = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Union[str, Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 115 | 0 |
"""simple docstring"""
from collections import defaultdict
class A__ :
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase : Optional[Any] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(_SCREAMING_SNAKE_CASE))
]
__lowerCAmelCase : List[str] = defaultdict(_SCREAMING_SNAKE_CASE) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase : Tuple = (1 << len(_SCREAMING_SNAKE_CASE)) - 1
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Any:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase : Dict = self.count_ways_until(_SCREAMING_SNAKE_CASE , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
__lowerCAmelCase : Optional[Any] = total_ways_util
return self.dp[mask][task_no]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE)):
for j in task_performed[i]:
self.task[j].append(_SCREAMING_SNAKE_CASE)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
__snake_case : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case : List[str] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 356 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : str = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" ,__snake_case ).groups()[0]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Any=None) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = file_names
__lowerCAmelCase : Optional[int] = image_transform
__lowerCAmelCase : List[Any] = label_to_id
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.file_names[idx]
__lowerCAmelCase : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = raw_image.convert("RGB")
if self.image_transform is not None:
__lowerCAmelCase : Union[str, Any] = self.image_transform(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = extract_label(_SCREAMING_SNAKE_CASE)
if self.label_to_id is not None:
__lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="all" ,project_dir=args.project_dir )
else:
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : int = config["lr"]
__lowerCAmelCase : Union[str, Any] = int(config["num_epochs"] )
__lowerCAmelCase : Tuple = int(config["seed"] )
__lowerCAmelCase : Tuple = int(config["batch_size"] )
__lowerCAmelCase : int = config["image_size"]
if not isinstance(__snake_case ,(list, tuple) ):
__lowerCAmelCase : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase : Dict = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case ,__snake_case )
# Grab all the image filenames
__lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir ,__snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase : Any = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase : List[str] = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase : Dict = int(0.8 * len(__snake_case ) )
__lowerCAmelCase : str = random_perm[:cut]
__lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase : str = Compose([RandomResizedCrop(__snake_case ,scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase : Union[str, Any] = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase : Union[str, Any] = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
__lowerCAmelCase : Any = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : int = create_model("resnet50d" ,pretrained=__snake_case ,num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[str] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase : Any = False
for param in model.get_classifier().parameters():
__lowerCAmelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__snake_case ,max_lr=__snake_case ,epochs=__snake_case ,steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase : Optional[int] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase : str = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase : Dict = int(training_difference.replace("epoch_" ,"" ) ) + 1
__lowerCAmelCase : Optional[Any] = None
else:
__lowerCAmelCase : Any = int(training_difference.replace("step_" ,"" ) )
__lowerCAmelCase : Optional[int] = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case ,__snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase : Optional[int] = accelerator.skip_first_batches(__snake_case ,__snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
__lowerCAmelCase : Optional[int] = model(__snake_case )
__lowerCAmelCase : List[str] = torch.nn.functional.cross_entropy(__snake_case ,batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(__snake_case )
__lowerCAmelCase : List[str] = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} ,step=__snake_case ,)
if checkpointing_steps == "epoch":
__lowerCAmelCase : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" ,required=__snake_case ,help="The data folder on disk." )
parser.add_argument("--fp16" ,action="store_true" ,help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" ,type=__snake_case ,default=__snake_case ,help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." ,)
parser.add_argument(
"--output_dir" ,type=__snake_case ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--resume_from_checkpoint" ,type=__snake_case ,default=__snake_case ,help="If the training should continue from a checkpoint folder." ,)
parser.add_argument(
"--with_tracking" ,action="store_true" ,help="Whether to load in all available experiment trackers from the environment and use them for logging." ,)
parser.add_argument(
"--project_dir" ,type=__snake_case ,default="logs" ,help="Location on where to store experiment tracking logs` and relevent project information" ,)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : List[Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main() | 58 | 0 |
'''simple docstring'''
def a__ ( a__ = 10_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 1
__SCREAMING_SNAKE_CASE = []
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = prev_numerator + 2 * prev_denominator
__SCREAMING_SNAKE_CASE = prev_numerator + prev_denominator
if len(str(a__ ) ) > len(str(a__ ) ):
result.append(a__ )
__SCREAMING_SNAKE_CASE = numerator
__SCREAMING_SNAKE_CASE = denominator
return len(a__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a__ ( a__ , a__=None , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
while ask_again:
__SCREAMING_SNAKE_CASE = input(a__ )
try:
if default is not None and len(a__ ) == 0:
return default
return convert_value(a__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__ )
def a__ ( a__ , a__=[] , a__=None , a__=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BulletMenu(a__ , a__ )
__SCREAMING_SNAKE_CASE = menu.run(default_choice=a__ )
return convert_value(a__ ) if convert_value is not None else result
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def a__ ( a__ ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 267 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case : Optional[Any] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( _a ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : Optional[Any]=10_24 , lowerCAmelCase_ : Tuple=3.6 ) -> List[Any]:
"""simple docstring"""
_a = tokenizer
_a = tokenizer.bos_token_id
_a = dataset
_a = seq_length
_a = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ) -> int:
"""simple docstring"""
_a = iter(self.dataset )
_a = True
while more_examples:
_a , _a = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase_ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_a = False
break
_a = tokenizer(lowerCAmelCase_ , truncation=lowerCAmelCase_ )['''input_ids''']
_a = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase_ ) , self.seq_length ):
_a = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase_ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase_ )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = {'''streaming''': True}
_a = load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase )
_a = ConstantLengthDataset(UpperCamelCase , UpperCamelCase , seq_length=args.seq_length )
_a = DataLoader(UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
model.eval()
_a = []
for step, batch in enumerate(UpperCamelCase ):
with torch.no_grad():
_a = model(UpperCamelCase , labels=UpperCamelCase )
_a = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_a = torch.mean(torch.cat(UpperCamelCase ) )
try:
_a = torch.exp(UpperCamelCase )
except OverflowError:
_a = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_snake_case : List[str] = Accelerator()
# Parse configuration
_snake_case : List[str] = HfArgumentParser(EvaluationArguments)
_snake_case : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
_snake_case : Any = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_snake_case : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_snake_case : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_snake_case , _snake_case : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_snake_case , _snake_case : int = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 179 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[tf.Tensor, np.ndarray] ):
if isinstance(_snake_case , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase : Any = tf.shape(_snake_case )
if tensor.shape == tf.TensorShape(_snake_case ):
return dynamic
lowerCAmelCase : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_snake_case )]
def _snake_case ( _snake_case : tf.Tensor , _snake_case : Optional[int] = None , _snake_case : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=_snake_case , name=_snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Any=1E-5 , _snake_case : Union[str, Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_snake_case , _snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCAmelCase, lowerCAmelCase : Tuple = tf.nn.moments(_snake_case , axes=[axis] , keepdims=_snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase : Any = [1] * inputs.shape.rank
lowerCAmelCase : Optional[Any] = shape_list(_snake_case )[axis]
lowerCAmelCase : Union[str, Any] = tf.reshape(_snake_case , _snake_case )
lowerCAmelCase : Tuple = tf.reshape(_snake_case , _snake_case )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase : Dict = tf.nn.batch_normalization(
_snake_case , _snake_case , _snake_case , offset=_snake_case , scale=_snake_case , variance_epsilon=_snake_case , )
return outputs
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int]=0 , _snake_case : Tuple=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase : Optional[Any] = tf.shape(_snake_case )
lowerCAmelCase : Tuple = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_snake_case , _snake_case )
def _snake_case ( _snake_case : tf.Tensor ):
if not isinstance(_snake_case , tf.Tensor ):
lowerCAmelCase : Any = tf.convert_to_tensor(_snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase : int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( _snake_case : tf.Tensor , _snake_case : int , _snake_case : str = "input_ids" ):
tf.debugging.assert_less(
_snake_case , tf.cast(_snake_case , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(_snake_case )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str ):
lowerCAmelCase : Tuple = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase : Optional[Any] = [x for x in data if len(_snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCAmelCase : Optional[int] = np.asarray(_snake_case )
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Optional[Any] = np.array_split(_snake_case , _snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase : Optional[int] = np.array_split(_snake_case , _snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_snake_case ):
lowerCAmelCase : int = chunk_data
else:
lowerCAmelCase : List[Any] = data
def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] ):
if name in group.attrs:
lowerCAmelCase : Union[str, Any] = [n.decode('''utf8''' ) if hasattr(_snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCAmelCase : Dict = []
lowerCAmelCase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( _snake_case : Optional[int] ):
def _expand_single_ad_tensor(_snake_case : List[str] ):
if isinstance(_snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _snake_case )
| 60 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '''▁'''
snake_case__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
snake_case__ = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
snake_case__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase_ (lowerCamelCase__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Any="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : Tuple="<mask>" , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : Any = legacy_behaviour
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
A_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : Dict = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Dict = src_lang if src_lang is not None else "eng_Latn"
A_ : List[str] = self.lang_code_to_id[self._src_lang]
A_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
A_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Tuple = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Dict , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : int = [1] * len(self.prefix_tokens )
A_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : str = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : Any ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : str = src_lang
A_ : str = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
A_ : Any = self.convert_tokens_to_ids(_lowerCamelCase )
A_ : Optional[int] = tgt_lang_id
return inputs
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Optional[int] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Dict = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : Union[str, Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : Optional[Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = "".join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def _a ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _a ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
A_ : List[Any] = src_lang
A_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A_ : str = []
A_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Any = [self.cur_lang_code]
A_ : str = [self.eos_token_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A_ : Optional[int] = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : List[Any] = [self.cur_lang_code]
A_ : int = [self.eos_token_id]
| 353 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase__ = tuple[int, int]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> None:
_A : set[int] = vertices
_A : dict[EdgeT, int] = {
(min(__lowerCamelCase), max(__lowerCamelCase)): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None:
self.vertices.add(edge[0])
self.vertices.add(edge[1])
_A : int = weight
def _lowerCamelCase ( self) -> Graph:
_A : Graph = Graph({min(self.vertices)} , {})
_A : EdgeT
_A : int
_A : EdgeT
_A : int
while len(subgraph.vertices) < len(self.vertices):
_A : Any = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_A : int = edge
_A : int = weight
subgraph.add_edge(__lowerCamelCase , __lowerCamelCase)
return subgraph
def _UpperCAmelCase (UpperCamelCase__ : str = "p107_network.txt" ):
_A : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_A : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_A : dict[EdgeT, int] = {}
_A : list[str]
_A : int
_A : int
with open(UpperCamelCase__ ) as f:
_A : Dict = f.read().strip().split("\n" )
_A : List[str] = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_A : List[Any] = int(adjaceny_matrix[edgea][edgea] )
_A : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_A : Graph = graph.prims_algorithm()
_A : int = sum(graph.edges.values() )
_A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 11 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 't5'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A__ = 'past_encoder_sequence + sequence'
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
return common_inputs
@property
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
return 1_3
| 7 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
__UpperCamelCase : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
__UpperCamelCase : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__UpperCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__UpperCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :List[str] , a :List[str] , a :Optional[Any]=1_3 , a :List[Any]=7 , a :Optional[Any]=True , a :Any=False , a :Union[str, Any]=9_9 , a :List[str]=1_6 , a :Tuple=2 , a :Dict=4 , a :List[Any]=4 , a :Optional[int]="gelu" , a :str=0.1 , a :Union[str, Any]=0.1 , a :List[Any]=3_2 , a :Optional[Any]=2 , a :Optional[Any]=1 , a :Union[str, Any]=0 , a :Tuple=0.02 , ) -> Optional[Any]:
__UpperCamelCase : str = parent
__UpperCamelCase : Any = batch_size
__UpperCamelCase : List[Any] = seq_length
__UpperCamelCase : List[str] = is_training
__UpperCamelCase : Any = use_labels
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : int = num_attention_heads
__UpperCamelCase : int = intermediate_size
__UpperCamelCase : List[Any] = hidden_act
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Optional[int] = eos_token_id
__UpperCamelCase : Any = pad_token_id
__UpperCamelCase : Any = bos_token_id
__UpperCamelCase : Optional[int] = initializer_range
def _lowerCamelCase ( self :Dict ) -> List[str]:
__UpperCamelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase : Union[str, Any] = shift_tokens_right(a , 1 , 2 )
__UpperCamelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a , )
__UpperCamelCase : int = prepare_blenderbot_inputs_dict(a , a , a )
return config, inputs_dict
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase , __UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self :Tuple , a :Any , a :Union[str, Any] , a :Any ) -> int:
__UpperCamelCase : Union[str, Any] = 2_0
__UpperCamelCase : Dict = model_class_name(a )
__UpperCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCamelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , a , a )
__UpperCamelCase : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__UpperCamelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : int = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
__UpperCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCamelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , )
__UpperCamelCase : Optional[Any] = model.decode(a , a )
__UpperCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def _lowerCamelCase ( self :Dict , a :Optional[int] , a :Dict , a :Union[str, Any] ) -> List[str]:
__UpperCamelCase : Union[str, Any] = 2_0
__UpperCamelCase : List[Any] = model_class_name(a )
__UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCamelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase : Any = model.init_cache(decoder_input_ids.shape[0] , a , a )
__UpperCamelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
__UpperCamelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCamelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , )
__UpperCamelCase : int = model.decode(a , a , decoder_attention_mask=a )
__UpperCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
_A = 9_9
def _lowerCamelCase ( self :int ) -> str:
__UpperCamelCase : Any = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase : Dict = input_ids.shape[0]
__UpperCamelCase : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = self._get_config_and_data()
__UpperCamelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(a )
__UpperCamelCase : Dict = lm_model(input_ids=a )
__UpperCamelCase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , a )
def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]:
__UpperCamelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__UpperCamelCase : List[str] = FlaxBlenderbotForConditionalGeneration(a )
__UpperCamelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__UpperCamelCase : Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase : Dict = lm_model(input_ids=a , decoder_input_ids=a )
__UpperCamelCase : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , a )
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__UpperCamelCase : List[Any] = shift_tokens_right(a , 1 , 2 )
__UpperCamelCase : int = np.equal(a , 1 ).astype(np.floataa ).sum()
__UpperCamelCase : Optional[Any] = np.equal(a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( __lowercase , unittest.TestCase , __lowercase):
'''simple docstring'''
_A = True
_A = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self :Tuple ) -> int:
__UpperCamelCase : Any = FlaxBlenderbotModelTester(self )
def _lowerCamelCase ( self :str ) -> int:
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a , a , a )
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a )
def _lowerCamelCase ( self :str ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : List[str] = self._prepare_for_class(a , a )
__UpperCamelCase : Dict = model_class(a )
@jax.jit
def encode_jitted(a :List[Any] , a :Any=None , **a :Dict ):
return model.encode(input_ids=a , attention_mask=a )
with self.subTest("JIT Enabled" ):
__UpperCamelCase : List[str] = encode_jitted(**a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase : List[str] = encode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : List[str] = model_class(a )
__UpperCamelCase : Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__UpperCamelCase : List[str] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a :Tuple , a :Optional[Any] , a :Optional[int] ):
return model.decode(
decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , )
with self.subTest("JIT Enabled" ):
__UpperCamelCase : Optional[Any] = decode_jitted(**a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase : Any = decode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self :Any ) -> int:
for model_class_name in self.all_model_classes:
__UpperCamelCase : int = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase : Optional[int] = model(a )
self.assertIsNotNone(a )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
__UpperCamelCase : List[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__UpperCamelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=a )
__UpperCamelCase : Tuple = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__UpperCamelCase : Optional[int] = ["Sam"]
__UpperCamelCase : int = tokenizer(a , return_tensors="jax" )
__UpperCamelCase : Tuple = model.generate(**a , **a )
__UpperCamelCase : int = "Sam is a great name. It means \"sun\" in Gaelic."
__UpperCamelCase : str = tokenizer.batch_decode(a , **a )
assert generated_txt[0].strip() == tgt_text | 151 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict=1_3 , a :Tuple=7 , a :List[Any]=True , a :List[str]=True , a :List[Any]=True , a :Optional[Any]=True , a :Union[str, Any]=9_9 , a :int=3_2 , a :Optional[Any]=2 , a :List[str]=4 , a :Optional[Any]=3_7 , a :Union[str, Any]="gelu" , a :Optional[int]=0.1 , a :Dict=0.1 , a :Tuple=5_1_2 , a :Union[str, Any]=1_6 , a :int=2 , a :Any=0.02 , a :Union[str, Any]=False , a :int=True , a :str="None" , a :Union[str, Any]=3 , a :str=4 , a :List[Any]=None , ) -> Tuple:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Dict = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = relative_attention
__UpperCamelCase : Union[str, Any] = position_biased_input
__UpperCamelCase : Any = pos_att_type
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] , a :int , a :List[Any] , a :Optional[int] , a :Union[str, Any] , a :Union[str, Any] , a :str , a :int ) -> Optional[int]:
__UpperCamelCase : List[str] = TFDebertaVaModel(config=a )
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : Optional[int] = model(a )
__UpperCamelCase : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :List[Any] , a :Dict , a :Tuple , a :Union[str, Any] , a :str , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :List[Any] , a :Optional[int] , a :Optional[Any] , a :int , a :Optional[int] , a :Any , a :Dict , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.num_labels
__UpperCamelCase : int = TFDebertaVaForSequenceClassification(config=a )
__UpperCamelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Dict , a :Union[str, Any] , a :Tuple , a :Tuple , a :Union[str, Any] , a :str ) -> int:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : str = TFDebertaVaForTokenClassification(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Union[str, Any] , a :List[str] , a :Union[str, Any] , a :Optional[Any] , a :Union[str, Any] , a :Tuple ) -> int:
__UpperCamelCase : List[Any] = TFDebertaVaForQuestionAnswering(config=a )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Dict = TFDebertaVaModelTester(self )
__UpperCamelCase : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCamelCase : List[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : str = model(a , attention_mask=a )[0]
__UpperCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 151 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCamelCase_ ) , version.parse(lowerCamelCase_ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __A ( lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = requirement, None, None
else:
SCREAMING_SNAKE_CASE : Any = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , lowerCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f''' got {requirement}''' )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = match[0]
SCREAMING_SNAKE_CASE : Tuple = want_full.split(""",""" ) # there could be multiple requirements
SCREAMING_SNAKE_CASE : List[str] = {}
for w in want_range:
SCREAMING_SNAKE_CASE : int = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , lowerCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f''' but got {requirement}''' )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = match[0]
SCREAMING_SNAKE_CASE : Tuple = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
SCREAMING_SNAKE_CASE : str = """.""".join([str(lowerCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return
# check if any version is installed
try:
SCREAMING_SNAKE_CASE : List[str] = importlib.metadata.version(lowerCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(lowerCamelCase_ , lowerCamelCase_ )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A: Any = imread(R"digital_image_processing/image_data/lena_small.jpg")
A: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def _snake_case ( ):
UpperCAmelCase : Optional[int] = cn.convert_to_negative(UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _snake_case ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def _snake_case ( ):
UpperCAmelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _snake_case ( ):
UpperCAmelCase : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase : Any = canny.canny(UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _snake_case ( ):
assert gg.gaussian_filter(UpperCamelCase , 5 , sigma=0.9 ).all()
def _snake_case ( ):
# laplace diagonals
UpperCAmelCase : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase : Dict = conv.img_convolve(UpperCamelCase , UpperCamelCase ).astype(UpperCamelCase )
assert res.any()
def _snake_case ( ):
assert med.median_filter(UpperCamelCase , 3 ).any()
def _snake_case ( ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = sob.sobel_filter(UpperCamelCase )
assert grad.any() and theta.any()
def _snake_case ( ):
UpperCAmelCase : str = sp.make_sepia(UpperCamelCase , 20 )
assert sepia.all()
def _snake_case ( UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" ):
UpperCAmelCase : List[str] = bs.Burkes(imread(UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _snake_case ( UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
UpperCAmelCase : List[str] = rs.NearestNeighbour(imread(UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _snake_case ( ):
UpperCAmelCase : Optional[Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
UpperCAmelCase : Optional[Any] = imread(UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Any = image[x_coordinate][y_coordinate]
UpperCAmelCase : Dict = lbp.get_neighbors_pixel(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase : List[Any] = lbp.local_binary_value(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert lbp_image.any()
| 109 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: str = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase : Dict = """base_with_context"""
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Dict =nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
a__ : Tuple =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
a__ : int =weights[f'''layers_{lyr_num}''']
a__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
a__ : Optional[Any] =ly_weight["attention"]
a__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a__ : Any =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a__ : Dict =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a__ : int =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : List[str] =nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
a__ : List[str] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
a__ : Dict =weights[f'''layers_{lyr_num}''']
a__ : List[Any] =ly_weight["attention"]
a__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a__ : Dict =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a__ : int =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
a__ : Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a__ : Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a__ : Tuple =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a__ : List[str] =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
a__ : int =nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
a__ : Dict =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=SCREAMING_SNAKE_CASE )
a__ : str =nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
a__ : Tuple =weights[f'''layers_{lyr_num}''']
a__ : Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
a__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
a__ : Dict =ly_weight["self_attention"]
a__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a__ : str =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a__ : Tuple =ly_weight["MultiHeadDotProductAttention_0"]
a__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a__ : Any =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a__ : str =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a__ : List[str] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
a__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
a__ : str =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a__ : Any =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a__ : List[str] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
a__ : List[Any] =nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
a__ : int =jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE )
a__ : List[Any] =[
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
a__ : Union[str, Any] =os.path.join(args.checkpoint_path , ".." , "config.gin" )
a__ : Tuple =inference.parse_training_gin_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
a__ : Any =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
a__ : Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
a__ : Optional[Any] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
a__ : Dict =load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , SCREAMING_SNAKE_CASE )
a__ : Tuple =load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , SCREAMING_SNAKE_CASE )
a__ : List[str] =load_decoder(ta_checkpoint["target"]["decoder"] , SCREAMING_SNAKE_CASE )
a__ : Optional[int] =OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
a__ : Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE , continuous_encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , melgan=SCREAMING_SNAKE_CASE , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
main(args)
| 367 |
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : Tuple = [1, 3, 5, 7, 9]
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : str =0
for digit in range(10 ):
a__ : int =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
a__ : List[str] =0
for digita in range(10 ):
a__ : Optional[int] =digita
if (remainder + digita) % 2 == 0:
a__ : Dict =ODD_DIGITS
else:
a__ : Any =EVEN_DIGITS
for digita in other_parity_digits:
a__ : Union[str, Any] =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _A ( SCREAMING_SNAKE_CASE : int = 9 ):
"""simple docstring"""
a__ : List[str] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 1
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int=2_000 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[int]=20 ,lowerCamelCase__ : Union[str, Any]=1e-3 ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, torch.device] = None ):
UpperCAmelCase__ = torch.linspace(1 ,self.config.sampling_eps ,lowerCamelCase__ ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : int=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase__ = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase__ = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase__ = std.unsqueeze(-1 )
UpperCAmelCase__ = -score / std
# compute
UpperCAmelCase__ = -1.0 / len(self.timesteps )
UpperCAmelCase__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase__ = beta_t.unsqueeze(-1 )
UpperCAmelCase__ = -0.5 * beta_t * x
UpperCAmelCase__ = torch.sqrt(lowerCamelCase__ )
UpperCAmelCase__ = drift - diffusion**2 * score
UpperCAmelCase__ = x + drift * dt
# add noise
UpperCAmelCase__ = randn_tensor(x.shape ,layout=x.layout ,generator=lowerCamelCase__ ,device=x.device ,dtype=x.dtype )
UpperCAmelCase__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps
| 98 |
def _lowerCAmelCase (_lowerCAmelCase):
if n_term == "":
return []
UpperCamelCase_ = []
for temp in range(int(_lowerCAmelCase)):
series.append(f"""1/{temp + 1}""" if series else "1")
return series
if __name__ == "__main__":
UpperCAmelCase : Optional[int] =input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 128 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301 |
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase : Optional[int] = """BlipImageProcessor"""
_UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = False
super().__init__(__magic_name__ , __magic_name__ )
lowerCamelCase : Dict = self.image_processor
def __call__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCamelCase : List[str] = self.tokenizer
lowerCamelCase : str = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
return text_encoding
# add pixel_values
lowerCamelCase : List[Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(__magic_name__ )
return encoding_image_processor
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def UpperCamelCase ( _lowerCamelCase : Callable[[float], float] , _lowerCamelCase : float , _lowerCamelCase : float ):
A__ = xa
A__ = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
A__ = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A__ = x_na
A__ = x_na
def UpperCamelCase ( _lowerCamelCase : float ):
return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 363 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
A__ = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 123 | 0 |
from math import sqrt
def _a ( lowerCamelCase: int ) -> int:
'''simple docstring'''
__A = 0
for i in range(1 , int(sqrt(lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase ):
total += i + n // i
elif i == sqrt(lowerCamelCase ):
total += i
return total - n
def _a ( lowerCamelCase: int = 1_00_00 ) -> int:
'''simple docstring'''
__A = sum(
i
for i in range(1 , lowerCamelCase )
if sum_of_divisors(sum_of_divisors(lowerCamelCase ) ) == i and sum_of_divisors(lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 117 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case__ : int = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _a ( lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
__A = [image]
__A = [trans(img.convert('''RGB''' ) ) for img in image]
__A = torch.stack(lowerCamelCase )
return image
class A_ ( _lowerCamelCase ):
def __init__(self :List[str] , _UpperCamelCase :List[Any] , _UpperCamelCase :List[str] )-> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :Optional[Any] )-> Union[str, Any]:
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Union[str, Any]:
# get the original timestep using init_timestep
__A = min(int(num_inference_steps * strength ) , _UpperCamelCase )
__A = max(num_inference_steps - init_timestep , 0 )
__A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase (self :str , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Dict=None )-> List[str]:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}""" )
__A = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A = init_latents.shape
__A = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
print('''add noise to latents at timestep''' , _UpperCamelCase )
__A = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__A = init_latents
return latents
@torch.no_grad()
def __call__(self :List[str] , _UpperCamelCase :Union[torch.FloatTensor, PIL.Image.Image] = None , _UpperCamelCase :float = 0.8 , _UpperCamelCase :int = 1 , _UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase :float = 0.0 , _UpperCamelCase :int = 50 , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , )-> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_UpperCamelCase )
# 2. Preprocess image
__A = preprocess(_UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
__A , __A = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , self.device )
__A = timesteps[:1].repeat(_UpperCamelCase )
# 4. Prepare latent variables
__A = self.prepare_latents(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.unet.dtype , self.device , _UpperCamelCase )
__A = latents
# 5. Denoising loop
for t in self.progress_bar(_UpperCamelCase ):
# 1. predict noise model_output
__A = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , eta=_UpperCamelCase , use_clipped_model_output=_UpperCamelCase , generator=_UpperCamelCase , ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_UpperCamelCase )
| 117 | 1 |
'''simple docstring'''
from math import factorial, radians
def __lowercase ( __lowercase , __lowercase = 18 , __lowercase = 10 ) -> float:
'''simple docstring'''
_A = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_A = radians(__lowercase )
_A = angle_in_radians
_A = 3
_A = -1
for _ in range(__lowercase ):
result += (b * (angle_in_radians**a)) / factorial(__lowercase )
_A = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowercase , __lowercase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=12 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=0 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = projection_dim
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = dropout
lowercase = attention_dropout
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = scope
lowercase = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase = input_mask.numpy()
lowercase , lowercase = input_mask.shape
lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
lowercase = 1
lowercase = 0
lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = TFBlipTextModel(config=_UpperCamelCase )
lowercase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , training=_UpperCamelCase )
lowercase = model(_UpperCamelCase , training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( __a , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCamelCase : Tuple = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BlipTextModelTester(self )
lowercase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 195 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_A = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> str:
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
lowerCAmelCase_ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(_UpperCamelCase , "w" , newline="\n" ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase_ = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , _UpperCamelCase ) , )
| 231 | 0 |
from __future__ import annotations
from math import gcd
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int = 2 , snake_case_ : int = 1 , snake_case_ : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> int:
return (pow(snake_case_ , 2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
snake_case__ : str = seed
snake_case__ : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case__ : Union[str, Any] = rand_fn(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Any = rand_fn(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = rand_fn(snake_case_ , snake_case_ , snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case__ : str = gcd(hare - tortoise , snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case__ : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"{args.num} is probably prime")
else:
__lowerCamelCase : Union[str, Any] = args.num // divisor
print(f"{args.num} = {divisor} * {quotient}")
| 286 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
snake_case__ : List[str] = MobileBertConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : Dict = MobileBertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
snake_case__ : Any = load_tf_weights_in_mobilebert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 286 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
snake_case : Optional[Any] = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Union[str, Any] = EfficientNetConfig()
a :Tuple = CONFIG_MAP[model_name]['''hidden_dim''']
a :Dict = CONFIG_MAP[model_name]['''width_coef''']
a :Dict = CONFIG_MAP[model_name]['''depth_coef''']
a :Dict = CONFIG_MAP[model_name]['''image_size''']
a :int = CONFIG_MAP[model_name]['''dropout_rate''']
a :str = CONFIG_MAP[model_name]['''dw_padding''']
a :str = '''huggingface/label-files'''
a :str = '''imagenet-1k-id2label.json'''
a :List[str] = 1000
a :Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
a :Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
a :Optional[Any] = idalabel
a :Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a :Dict = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Tuple = CONFIG_MAP[model_name]['''image_size''']
a :Dict = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__lowerCamelCase , )
return preprocessor
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :str = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
a :Tuple = sorted(set(__lowerCamelCase ) )
a :Optional[int] = len(__lowerCamelCase )
a :Any = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )}
a :str = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
a :Optional[int] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
a :Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
a :Tuple = '''efficientnet.''' + item[1]
a :int = '''classifier.weight'''
a :List[Any] = '''classifier.bias'''
return key_mapping
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
a :Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
a :Any = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a :Union[str, Any] = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a :Union[str, Any] = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
a :Dict = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :Optional[Any] = model_classes[model_name](
include_top=__lowerCamelCase , weights='''imagenet''' , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=1000 , classifier_activation='''softmax''' , )
a :str = original_model.trainable_variables
a :Dict = original_model.non_trainable_variables
a :Optional[int] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a :List[str] = param.numpy()
a :List[str] = list(tf_params.keys() )
# Load HuggingFace model
a :Optional[int] = get_efficientnet_config(__lowerCamelCase )
a :int = EfficientNetForImageClassification(__lowerCamelCase ).eval()
a :Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
a :List[Any] = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Initialize preprocessor and preprocess input image
a :Optional[Any] = convert_image_processor(__lowerCamelCase )
a :Union[str, Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
a :Union[str, Any] = hf_model(**__lowerCamelCase )
a :Tuple = outputs.logits.detach().numpy()
# Original model inference
a :Optional[int] = False
a :Any = CONFIG_MAP[model_name]['''image_size''']
a :List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a :Union[str, Any] = image.img_to_array(__lowerCamelCase )
a :Union[str, Any] = np.expand_dims(__lowerCamelCase , axis=0 )
a :Union[str, Any] = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
a :int = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
snake_case : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 94 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Dict: # noqa: E741
"""simple docstring"""
while r - l > 1:
UpperCAmelCase_ : int = (l + r) // 2
if v[m] >= key:
UpperCAmelCase_ : Tuple = m
else:
UpperCAmelCase_ : Any = m # noqa: E741
return r
def a__ ( _SCREAMING_SNAKE_CASE : list[int] ) -> int:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return 0
UpperCAmelCase_ : Any = [0] * len(_UpperCamelCase )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : int = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCAmelCase_ : Union[str, Any] = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase_ : Any = v[i]
length += 1
else:
UpperCAmelCase_ : Union[str, Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[int] ={
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowercase ( snake_case_ : Any ,snake_case_ : int ,snake_case_ : List[str]=None ) ->Union[str, Any]:
'''simple docstring'''
if rng is None:
__A : Union[str, Any] = random.Random()
__A : List[str] = 1
for dim in shape:
total_dims *= dim
__A : List[str] = []
for _ in range(snake_case_ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__A : Optional[Any] = np.array(snake_case_ ,dtype=jnp.intaa ).reshape(snake_case_ )
return output
def __lowercase ( snake_case_ : int ,snake_case_ : Union[str, Any]=None ) ->int:
'''simple docstring'''
__A : int = ids_tensor(snake_case_ ,vocab_size=2 ,rng=snake_case_ )
# make sure that at least one token is attended to for each batch
__A : str = 1
return attn_mask
@require_flax
class __snake_case :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = ()
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__A : Optional[int] = 2
__A : Tuple = inputs['''input_ids'''].shape[-1] // 2
__A : List[Any] = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__A : Optional[Any] = jnp.ones_like(__lowerCamelCase )
__A : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__A : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__A : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : List[Any] = self._get_input_ids_and_config()
__A : List[Any] = False
__A : str = max_length
__A : Tuple = 0
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(__lowerCamelCase )
__A : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
__A : Dict = getattr(__lowerCamelCase , __lowerCamelCase )
__A : Tuple = pt_model_class(__lowerCamelCase ).eval()
__A : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
__A : str = flax_model.generate(__lowerCamelCase ).sequences
__A : List[Any] = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__A : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Union[str, Any] = self._get_input_ids_and_config()
__A : List[str] = False
__A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__A : Optional[int] = model_class(__lowerCamelCase )
__A : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Union[str, Any] = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
__A : List[Any] = True
__A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__A : Dict = model_class(__lowerCamelCase )
__A : List[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : List[Any] = jit(model.generate )
__A : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : List[Any] = self._get_input_ids_and_config()
__A : List[str] = False
__A : Any = max_length
__A : List[Any] = 2
for model_class in self.all_generative_model_classes:
__A : Tuple = model_class(__lowerCamelCase )
__A : Optional[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Union[str, Any] = jit(model.generate )
__A : Optional[int] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Tuple = self._get_input_ids_and_config()
__A : str = False
__A : Union[str, Any] = max_length
__A : List[Any] = 2
__A : Any = 2
for model_class in self.all_generative_model_classes:
__A : List[str] = model_class(__lowerCamelCase )
__A : Optional[int] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Any = self._get_input_ids_and_config()
__A : Optional[Any] = True
__A : Union[str, Any] = max_length
__A : List[str] = 0.8
__A : List[str] = 10
__A : Union[str, Any] = 0.3
__A : Union[str, Any] = 1
__A : Optional[Any] = 8
__A : Dict = 9
for model_class in self.all_generative_model_classes:
__A : List[Any] = model_class(__lowerCamelCase )
__A : List[str] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Any = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Union[str, Any] = self._get_input_ids_and_config()
__A : Union[str, Any] = max_length
__A : List[str] = 1
__A : str = 8
__A : Any = 9
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(__lowerCamelCase )
__A : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Optional[Any] = jit(model.generate )
__A : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
__A : Optional[int] = max_length
__A : List[str] = 2
__A : List[Any] = 1
__A : Optional[Any] = 8
__A : str = 9
for model_class in self.all_generative_model_classes:
__A : Optional[int] = model_class(__lowerCamelCase )
__A : int = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : str = jit(model.generate )
__A : Any = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
# pad attention mask on the left
__A : Dict = attention_mask.at[(0, 0)].set(0 )
__A : Optional[Any] = False
__A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__A : int = model_class(__lowerCamelCase )
__A : Union[str, Any] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Optional[int] = jit(model.generate )
__A : Any = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : str = attention_mask.at[(0, 0)].set(0 )
__A : List[Any] = True
__A : Any = max_length
for model_class in self.all_generative_model_classes:
__A : str = model_class(__lowerCamelCase )
__A : List[Any] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : List[str] = jit(model.generate )
__A : Optional[int] = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : List[str] = attention_mask.at[(0, 0)].set(0 )
__A : Optional[int] = 2
__A : Dict = max_length
for model_class in self.all_generative_model_classes:
__A : Any = model_class(__lowerCamelCase )
__A : int = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : str = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__A : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__A : Any = '''Hello world'''
__A : Dict = tokenizer(__lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , '''do_samples''' ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , '''foo''' ):
__A : Any = {'''foo''': '''bar'''}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 179 | 0 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( __a :Dict , __a :List[Any]=7 ) -> Union[str, Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
A__ = '''636036'''
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
A__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
return result["workflow_runs"]
def __lowerCamelCase ( __a :List[Any] ) -> Tuple:
"""simple docstring"""
A__ = get_daily_ci_runs(UpperCamelCase__ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run['''id''']
break
return workflow_run_id
def __lowerCamelCase ( __a :List[str] , __a :int , __a :List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = get_last_daily_ci_runs(UpperCamelCase__ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=UpperCamelCase__ , token=UpperCamelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase__ , artifact_url=UpperCamelCase__ , output_dir=UpperCamelCase__ , token=UpperCamelCase__ )
def __lowerCamelCase ( __a :Any , __a :Optional[Any] , __a :Dict ) -> Dict:
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(UpperCamelCase__ , F'{artifact_name}.zip' )
if os.path.isfile(UpperCamelCase__ ):
A__ = {}
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
with z.open(UpperCamelCase__ ) as f:
A__ = f.read().decode("""UTF-8""" )
return results
| 352 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : List[str] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = ['''pixel_values''']
def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None:
super().__init__(**__snake_case )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = offset
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict:
_lowerCAmelCase = image.astype(np.floataa )
if offset:
_lowerCAmelCase = image - (scale / 2)
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(__snake_case )
if do_resize:
_lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case )
if do_center_crop:
_lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case )
if do_rescale:
_lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case )
if do_normalize:
_lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case )
_lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case )
return image
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = offset if offset is not None else self.offset
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase = make_batched(__snake_case )
_lowerCAmelCase = [
[
self._preprocess_image(
image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 70 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class a_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase__ : Dict , **lowercase__ : Optional[int]):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__)
requires_backends(self , 'vision')
self.check_model_type(lowercase__)
def __call__( self : List[Any] , lowercase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase__ : Dict):
'''simple docstring'''
return super().__call__(lowercase__ , **lowercase__)
def __snake_case ( self : Optional[int] , **lowercase__ : Tuple):
'''simple docstring'''
return {}, {}, {}
def __snake_case ( self : List[Any] , lowercase__ : Tuple):
'''simple docstring'''
lowerCAmelCase__ = load_image(lowercase__)
lowerCAmelCase__ = image.size
lowerCAmelCase__ = self.image_processor(images=lowercase__ , return_tensors=self.framework)
return model_inputs
def __snake_case ( self : Optional[int] , lowercase__ : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.model(**lowercase__)
return model_outputs
def __snake_case ( self : List[Any] , lowercase__ : Any):
'''simple docstring'''
lowerCAmelCase__ = model_outputs.predicted_depth
lowerCAmelCase__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='bicubic' , align_corners=lowercase__)
lowerCAmelCase__ = prediction.squeeze().cpu().numpy()
lowerCAmelCase__ = (output * 255 / np.max(lowercase__)).astype('uint8')
lowerCAmelCase__ = Image.fromarray(lowercase__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = predicted_depth
lowerCAmelCase__ = depth
return output_dict
| 351 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = "Hello, World!"
lowercase__ = "en_XX"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = Path('data_bin' )
UpperCAmelCase : Optional[int] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(UpperCAmelCase_ ).parent ) , checkpoint_file=Path(UpperCAmelCase_ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(UpperCAmelCase_ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(UpperCAmelCase_ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , UpperCAmelCase_ )
UpperCAmelCase : Any = XmodForSequenceClassification(UpperCAmelCase_ ) if classification_head else XmodForMaskedLM(UpperCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : int = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : str = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : str = model.roberta.encoder.layer[i]
UpperCAmelCase : List[str] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : List[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Dict = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Dict = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Tuple = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
UpperCAmelCase : Tuple = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : Optional[Any] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : List[Any] = xmod_layer.fca.bias
# output
UpperCAmelCase : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
UpperCAmelCase : Optional[Any] = xmod_layer.fca.weight
UpperCAmelCase : List[str] = xmod_layer.fca.bias
UpperCAmelCase : Optional[int] = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Dict = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : Any = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : Optional[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : str = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Any = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[str] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : Optional[Any] = xmod.model.classification_heads['mnli'].dense.weight
UpperCAmelCase : Optional[Any] = xmod.model.classification_heads['mnli'].dense.bias
UpperCAmelCase : List[Any] = xmod.model.classification_heads['mnli'].out_proj.weight
UpperCAmelCase : Optional[int] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Optional[Any] = xmod.encode(UpperCAmelCase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads['mnli'](xmod.extract_features(UpperCAmelCase_ ) )
else:
UpperCAmelCase : str = xmod.model(UpperCAmelCase_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase : Union[str, Any] = torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(UpperCAmelCase_ ).mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 151 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : int = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : int = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
SCREAMING_SNAKE_CASE : Dict = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 3 * [inputs["""prompt"""]]
# forward
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Tuple = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE : List[Any] = ldmad_pipe.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : List[str] = text_inputs["""input_ids"""].to(__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = ldmad_pipe.text_encoder(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Dict = prompt_embeds
# forward
SCREAMING_SNAKE_CASE : str = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : str = """french fries"""
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : List[Any] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : Dict = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
SCREAMING_SNAKE_CASE : Any = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="cpu" , UpperCAmelCase_ : str=torch.floataa , UpperCAmelCase_ : Tuple=0 ):
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : str = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
SCREAMING_SNAKE_CASE : Any = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : int = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE : Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict="cpu" , UpperCAmelCase_ : int=torch.floataa , UpperCAmelCase_ : Union[str, Any]=0 ):
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Dict = 0.495_586
SCREAMING_SNAKE_CASE : Dict = 0.33_795_515
SCREAMING_SNAKE_CASE : Union[str, Any] = 112.48_518
SCREAMING_SNAKE_CASE : Any = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Optional[int] = 0.4_194_127
SCREAMING_SNAKE_CASE : str = 0.35_375_586
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.5_638_502
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 368 |
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.