code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: int =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =inputs["""prompt"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""generator"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE_: Tuple =inputs["""output_type"""]
if "image" in inputs:
SCREAMING_SNAKE_CASE_: List[str] =inputs["""image"""]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs["""mask_image"""]
else:
SCREAMING_SNAKE_CASE_: Tuple =None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE_: List[str] =inputs["""original_image"""]
else:
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: str =pipe.encode_prompt(lowerCAmelCase )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_: int =image
if mask_image is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_: str =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =inputs["""generator"""]
SCREAMING_SNAKE_CASE_: List[str] =inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE_: List[str] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE_: Dict ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =image
if mask_image is not None:
SCREAMING_SNAKE_CASE_: List[str] =mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE_: int =original_image
SCREAMING_SNAKE_CASE_: List[str] =pipe_loaded(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Optional[int] =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase , 1E-4 )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE_: Tuple =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =pipe_loaded(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: List[Any] =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase , 1E-4 )
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a :
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : str=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Dict=19 , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =parent
SCREAMING_SNAKE_CASE_: Tuple =batch_size
SCREAMING_SNAKE_CASE_: List[Any] =seq_length
SCREAMING_SNAKE_CASE_: int =is_training
SCREAMING_SNAKE_CASE_: Any =use_input_mask
SCREAMING_SNAKE_CASE_: List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE_: List[Any] =use_labels
SCREAMING_SNAKE_CASE_: Tuple =vocab_size
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: List[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Any =num_labels
SCREAMING_SNAKE_CASE_: Dict =num_choices
SCREAMING_SNAKE_CASE_: Any =scope
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Optional[int] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Any =None
SCREAMING_SNAKE_CASE_: Dict =None
SCREAMING_SNAKE_CASE_: int =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
): Any =config_and_inputs
SCREAMING_SNAKE_CASE_: int ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[str] = False
UpperCamelCase : List[str] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase : List[str] = ()
UpperCamelCase : List[Any] = {} if is_torch_available() else {}
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE_: Tuple =ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class a ( UpperCAmelCase__ ):
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
SCREAMING_SNAKE_CASE_: str =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase )["""positions"""]
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([2.5_8_2_8, 0.7_9_9_3, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1E-4 ) )
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __magic_name__ ( lowercase ): # picklable for multiprocessing
return x.sum()
def __magic_name__ ( lowercase ): # picklable for multiprocessing
return i + 1
@dataclass
class a :
UpperCamelCase : int
UpperCamelCase : str
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =1
SCREAMING_SNAKE_CASE_: List[Any] =[1, 2]
SCREAMING_SNAKE_CASE_: Optional[int] ={"""a""": 1, """b""": 2}
SCREAMING_SNAKE_CASE_: Tuple ={"""a""": [1, 2], """b""": [3, 4]}
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""a""": {"""1""": 1}, """b""": 2}
SCREAMING_SNAKE_CASE_: List[str] ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
SCREAMING_SNAKE_CASE_: List[str] ={}
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: Dict =2
SCREAMING_SNAKE_CASE_: Any =[2, 3]
SCREAMING_SNAKE_CASE_: Dict ={"""a""": 2, """b""": 3}
SCREAMING_SNAKE_CASE_: Any ={"""a""": [2, 3], """b""": [4, 5]}
SCREAMING_SNAKE_CASE_: str ={"""a""": {"""1""": 2}, """b""": 3}
SCREAMING_SNAKE_CASE_: str ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =2
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
SCREAMING_SNAKE_CASE_: Tuple ={"""a""": 2, """b""": 0, """c""": 2}
SCREAMING_SNAKE_CASE_: str ={
"""a""": np.eye(2 ).astype(lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(lowerCAmelCase ),
"""c""": np.ones(2 ).astype(lowerCAmelCase ),
}
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , map_numpy=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCAmelCase , lowerCAmelCase , map_numpy=lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCAmelCase , lowerCAmelCase , map_numpy=lowerCAmelCase , num_proc=lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCAmelCase , lowerCAmelCase , map_numpy=lowerCAmelCase , num_proc=lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda lowerCAmelCase : x + 1 , lowerCAmelCase , num_proc=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""a""": 1, """b""": 2}
SCREAMING_SNAKE_CASE_: int ={"""a""": 3, """b""": 4}
SCREAMING_SNAKE_CASE_: Any ={"""a""": 5, """b""": 6}
SCREAMING_SNAKE_CASE_: Optional[Any] =sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) ) , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
class a :
UpperCamelCase : int = 'bar'
SCREAMING_SNAKE_CASE_: Union[str, Any] =Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_: Dict ={f'''{i}''': i for i in range(lowercase )}
SCREAMING_SNAKE_CASE_: List[str] =map_nested(lambda lowercase : x + 10 , lowercase , num_proc=lowercase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a ( UpperCAmelCase__ ):
@require_tf
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_: Dict =layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_: int =tf.random.uniform((1, 3) )
return model(lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Any =gen_random_output()
SCREAMING_SNAKE_CASE_: str =gen_random_output()
np.testing.assert_equal(lowerCAmelCase , lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_: int =torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.rand(1 , 3 )
return model(lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =gen_random_output()
with temp_seed(42 , set_pytorch=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =gen_random_output()
SCREAMING_SNAKE_CASE_: str =gen_random_output()
np.testing.assert_equal(lowerCAmelCase , lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_: Dict =gen_random_output()
SCREAMING_SNAKE_CASE_: Optional[Any] =gen_random_output()
np.testing.assert_equal(lowerCAmelCase , lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =NestedDataStructure(lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =NestedDataStructure(lowercase ).flatten()
assert output == expected_output
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =A(x=1 , y="""foobar""" )
SCREAMING_SNAKE_CASE_: List[str] ={"""x""": 1, """y""": """foobar"""}
assert asdict(lowercase ) == expected_output
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
SCREAMING_SNAKE_CASE_: List[str] ={"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(lowercase ) == expected_output
with pytest.raises(lowercase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __magic_name__ ( lowercase ):
return text.split()
def __magic_name__ ( lowercase ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __magic_name__ ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_: Any =list(iflatmap_unordered(lowercase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(iflatmap_unordered(lowercase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for yield_time, content in iflatmap_unordered(
lowercase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(lowercase ) == 4
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 3 , lowercase = 7 , lowercase = 100_0000 ):
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_: str =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_: str =current_numerator
SCREAMING_SNAKE_CASE_: List[Any] =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
UpperCamelCase : List[str] = JukeboxTokenizer
UpperCamelCase : Union[str, Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Any =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: Any =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
SCREAMING_SNAKE_CASE_: Dict =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: int =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , lowerCAmelCase , )
super().__init__(args=lowerCAmelCase , **lowerCAmelCase )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def lowerCamelCase__ ( *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : str ) -> int:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class a ( unittest.TestCase ):
UpperCamelCase : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE_: List[str] =[
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =vqa_pipeline(lowerCAmelCase , top_k=1 )
self.assertEqual(
lowerCAmelCase , [
[{"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}],
[{"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}],
] , )
@require_torch
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE_: List[Any] ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE_: Tuple ="""How many cats are there?"""
SCREAMING_SNAKE_CASE_: int =vqa_pipeline(image=lowerCAmelCase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}, {"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}] )
SCREAMING_SNAKE_CASE_: List[str] =vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}, {"""score""": ANY(lowerCAmelCase ), """answer""": ANY(lowerCAmelCase )}] )
@slow
@require_torch
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
SCREAMING_SNAKE_CASE_: List[Any] ="""./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE_: Dict ="""How many cats are there?"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =vqa_pipeline(image=lowerCAmelCase , question=lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE_: Optional[int] =vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE_: Optional[int] =vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
pass
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_lowerCAmelCase = parser.parse_args()
if args.model_type == "bert":
_lowerCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_lowerCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_lowerCAmelCase = model.state_dict()
_lowerCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_lowerCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_lowerCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_lowerCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_lowerCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_lowerCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = 'vision-encoder-decoder'
UpperCamelCase : Union[str, Any] = True
def __init__( self : Optional[Any] , **lowerCAmelCase : int ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE_: Optional[int] =encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE_: str =decoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE_: List[str] =AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Any ) -> PretrainedConfig:
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_: List[Any] =self.encoder.to_dict()
SCREAMING_SNAKE_CASE_: Tuple =self.decoder.to_dict()
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.__class__.model_type
return output
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : int ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =OrderedDict()
SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE_: int ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : "PreTrainedTokenizerBase" , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict()
SCREAMING_SNAKE_CASE_: Dict =super().generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =dummy_input["""input_ids"""].shape
SCREAMING_SNAKE_CASE_: Any =(batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE_: List[str] =dummy_input.pop("""input_ids""" )
SCREAMING_SNAKE_CASE_: Tuple =dummy_input.pop("""attention_mask""" )
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(lowerCAmelCase )
return common_inputs
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Tuple ) -> None:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str , lowerCAmelCase : PretrainedConfig ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" ) -> OnnxConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase , lowerCAmelCase )
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
(SCREAMING_SNAKE_CASE_): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCAmelCase = True
except ImportError:
_UpperCAmelCase = False
try:
from torch.hub import _get_torch_home
_UpperCAmelCase = _get_torch_home()
except ImportError:
_UpperCAmelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
_UpperCAmelCase = os.path.join(torch_cache_home, """transformers""")
_UpperCAmelCase = """https://cdn.huggingface.co"""
_UpperCAmelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
_UpperCAmelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
_UpperCAmelCase = os.path.join(PATH, """config.yaml""")
_UpperCAmelCase = os.path.join(PATH, """attributes.txt""")
_UpperCAmelCase = os.path.join(PATH, """objects.txt""")
_UpperCAmelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
_UpperCAmelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCAmelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
_UpperCAmelCase = """pytorch_model.bin"""
_UpperCAmelCase = """config.yaml"""
def __magic_name__ ( lowercase=OBJECTS , lowercase=ATTRIBUTES ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
with open(lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
SCREAMING_SNAKE_CASE_: Optional[int] =[]
with open(lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =OrderedDict()
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Any =pkl.load(lowercase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE_: Optional[int] =ckp.pop(lowercase )
if isinstance(lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor(lowercase )
else:
assert isinstance(lowercase , torch.tensor ), type(lowercase )
SCREAMING_SNAKE_CASE_: str =v
return r
class a :
UpperCamelCase : Dict = {}
def __init__( self : Tuple , lowerCAmelCase : dict , lowerCAmelCase : str = "root" , lowerCAmelCase : Tuple=0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =name
SCREAMING_SNAKE_CASE_: Optional[Any] =level
SCREAMING_SNAKE_CASE_: Dict ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE_: List[Any] =copy.deepcopy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =copy.deepcopy(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =Config(lowerCAmelCase , name=lowerCAmelCase , level=level + 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =v
setattr(self , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =d
def __repr__( self : Any ) -> int:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =val
SCREAMING_SNAKE_CASE_: str =val
SCREAMING_SNAKE_CASE_: Optional[int] =key.split(""".""" )
SCREAMING_SNAKE_CASE_: List[str] =len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: Tuple =self._pointer
if len(lowerCAmelCase ) > 1:
for i, l in enumerate(lowerCAmelCase ):
if hasattr(self , lowerCAmelCase ) and isinstance(getattr(self , lowerCAmelCase ) , lowerCAmelCase ):
setattr(getattr(self , lowerCAmelCase ) , """.""".join(levels[i:] ) , lowerCAmelCase )
if l == last_level:
SCREAMING_SNAKE_CASE_: Optional[Any] =val
else:
SCREAMING_SNAKE_CASE_: Tuple =pointer[l]
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
dump(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
json.dump(lowerCAmelCase , lowerCAmelCase )
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase ) as stream:
SCREAMING_SNAKE_CASE_: Any =load(lowerCAmelCase , Loader=lowerCAmelCase )
return data
def __str__( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =""" """
if self._name != "root":
SCREAMING_SNAKE_CASE_: Dict =f'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE_: Dict =""""""
SCREAMING_SNAKE_CASE_: List[Any] =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(lowerCAmelCase ).__name__})\n'''
SCREAMING_SNAKE_CASE_: Any =level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : str , **lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
return cls(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.pop("""cache_dir""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""force_download""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.pop("""resume_download""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =kwargs.pop("""proxies""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""local_files_only""" , lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =os.path.join(lowerCAmelCase , lowerCAmelCase )
elif os.path.isfile(lowerCAmelCase ) or is_remote_url(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE_: List[str] =hf_bucket_url(lowerCAmelCase , filename=lowerCAmelCase , use_cdn=lowerCAmelCase )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_: str =cached_path(
lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE_: Tuple =Config.load_yaml(lowerCAmelCase )
except EnvironmentError:
SCREAMING_SNAKE_CASE_: Any ="""Can't load config for"""
raise EnvironmentError(lowerCAmelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(lowerCAmelCase ), kwargs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.load("""dump.pt""" , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE_: Optional[int] =in_tensor.numpy()
SCREAMING_SNAKE_CASE_: List[Any] =out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase , lowercase , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(lowercase , lowercase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =urlparse(lowercase )
return parsed.scheme in ("http", "https")
def __magic_name__ ( lowercase , lowercase , lowercase=True ):
SCREAMING_SNAKE_CASE_: List[Any] =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE_: Optional[Any] ="""/""" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __magic_name__ ( lowercase , lowercase , lowercase=None , lowercase=0 , lowercase=None , ):
SCREAMING_SNAKE_CASE_: Tuple ="""python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase , lowercase ):
ua += "; " + "; ".join("""{}/{}""".format(lowercase , lowercase ) for k, v in user_agent.items() )
elif isinstance(lowercase , lowercase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE_: Any ={"""user-agent""": ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE_: List[str] ="""bytes=%d-""" % (resume_size,)
SCREAMING_SNAKE_CASE_: int =requests.get(lowercase , stream=lowercase , proxies=lowercase , headers=lowercase )
if response.status_code == 416: # Range not satisfiable
return
SCREAMING_SNAKE_CASE_: Optional[Any] =response.headers.get("""Content-Length""" )
SCREAMING_SNAKE_CASE_: int =resume_size + int(lowercase ) if content_length is not None else None
SCREAMING_SNAKE_CASE_: List[Any] =tqdm(
unit="""B""" , unit_scale=lowercase , total=lowercase , initial=lowercase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase ) )
temp_file.write(lowercase )
progress.close()
def __magic_name__ ( lowercase , lowercase=None , lowercase=False , lowercase=None , lowercase=10 , lowercase=False , lowercase=None , lowercase=False , ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_: Tuple =TRANSFORMERS_CACHE
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE_: int =requests.head(lowercase , allow_redirects=lowercase , proxies=lowercase , timeout=lowercase )
if response.status_code == 200:
SCREAMING_SNAKE_CASE_: Any =response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE_: int =url_to_filename(lowercase , lowercase )
# get cache path to put the file
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase ):
return cache_path
else:
SCREAMING_SNAKE_CASE_: Tuple =[
file
for file in fnmatch.filter(os.listdir(lowercase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(lowercase ) > 0:
return os.path.join(lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE_: Tuple =cache_path + """.lock"""
with FileLock(lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE_: Optional[Any] =cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowercase , """a+b""" ) as f:
yield f
SCREAMING_SNAKE_CASE_: Tuple =_resumable_file_manager
if os.path.exists(lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =os.stat(lowercase ).st_size
else:
SCREAMING_SNAKE_CASE_: Dict =0
else:
SCREAMING_SNAKE_CASE_: int =partial(tempfile.NamedTemporaryFile , dir=lowercase , delete=lowercase )
SCREAMING_SNAKE_CASE_: Dict =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowercase , temp_file.name , )
http_get(
lowercase , lowercase , proxies=lowercase , resume_size=lowercase , user_agent=lowercase , )
os.replace(temp_file.name , lowercase )
SCREAMING_SNAKE_CASE_: str ={"""url""": url, """etag""": etag}
SCREAMING_SNAKE_CASE_: Dict =cache_path + """.json"""
with open(lowercase , """w""" ) as meta_file:
json.dump(lowercase , lowercase )
return cache_path
def __magic_name__ ( lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: Any =url.encode("""utf-8""" )
SCREAMING_SNAKE_CASE_: List[Any] =shaaaa(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE_: str =etag.encode("""utf-8""" )
SCREAMING_SNAKE_CASE_: Dict =shaaaa(lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def __magic_name__ ( lowercase , lowercase=None , lowercase=False , lowercase=None , lowercase=False , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_: List[str] =TRANSFORMERS_CACHE
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(lowercase )
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =str(lowercase )
if is_remote_url(lowercase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE_: Optional[int] =get_from_cache(
lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , user_agent=lowercase , local_files_only=lowercase , )
elif os.path.exists(lowercase ):
# File, and it exists.
SCREAMING_SNAKE_CASE_: Any =url_or_filename
elif urlparse(lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowercase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowercase ) )
if extract_compressed_file:
if not is_zipfile(lowercase ) and not tarfile.is_tarfile(lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE_: str =os.path.split(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =output_file.replace(""".""" , """-""" ) + """-extracted"""
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(lowercase , lowercase )
if os.path.isdir(lowercase ) and os.listdir(lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_: Tuple =output_path + """.lock"""
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
os.makedirs(lowercase )
if is_zipfile(lowercase ):
with ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
elif tarfile.is_tarfile(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =tarfile.open(lowercase )
tar_file.extractall(lowercase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowercase ) )
return output_path_extracted
return output_path
def __magic_name__ ( lowercase , lowercase="," ):
assert isinstance(lowercase , lowercase )
if os.path.isfile(lowercase ):
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE_: Dict =eval(f.read() )
else:
SCREAMING_SNAKE_CASE_: Dict =requests.get(lowercase )
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =requests.json()
except Exception:
SCREAMING_SNAKE_CASE_: str =req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =eval(lowercase )
except Exception:
SCREAMING_SNAKE_CASE_: Optional[int] =data.split("""\n""" )
req.close()
return data
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =requests.get(lowercase )
SCREAMING_SNAKE_CASE_: Any =np.array(Image.open(BytesIO(response.content ) ) )
return img
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase )
with open(lowercase , """rb""" ) as stream:
SCREAMING_SNAKE_CASE_: Dict =pkl.load(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =weights.pop("""model""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
for k, v in model.items():
SCREAMING_SNAKE_CASE_: List[str] =torch.from_numpy(lowercase )
if "running_var" in k:
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([0] )
SCREAMING_SNAKE_CASE_: Optional[int] =k.replace("""running_var""" , """num_batches_tracked""" )
SCREAMING_SNAKE_CASE_: Any =zero
return new
def __magic_name__ ( ):
print(f'''{os.path.abspath(os.path.join(lowercase , os.pardir ) )}/demo.ipynb''' )
def __magic_name__ ( lowercase , lowercase="RGB" ):
assert isinstance(lowercase , lowercase )
if os.path.isfile(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =cva.imread(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =get_image_from_url(lowercase )
assert img is not None, f'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =cva.cvtColor(lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE_: Dict =img[:, :, ::-1]
return img
def __magic_name__ ( lowercase , lowercase=1 ):
return (images[i : i + batch] for i in range(0 , len(lowercase ) , lowercase ))
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 100 ):
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: str =0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[Any]=30 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : int=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=2 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: List[Any] =batch_size
SCREAMING_SNAKE_CASE_: Optional[Any] =image_size
SCREAMING_SNAKE_CASE_: List[str] =patch_size
SCREAMING_SNAKE_CASE_: Optional[Any] =num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: int =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: str =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] =initializer_range
SCREAMING_SNAKE_CASE_: str =scope
SCREAMING_SNAKE_CASE_: Any =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_: Tuple =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Any =num_patches + 2
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =DeiTForMaskedImageModeling(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: List[Any] =DeiTForMaskedImageModeling(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: int =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: Tuple =DeiTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
): List[str] =config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase : List[str] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =DeiTModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_: Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_: List[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**lowerCAmelCase ).loss
loss.backward()
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_: List[str] =False
SCREAMING_SNAKE_CASE_: int =True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_: Tuple =model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_: List[Any] =self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(**lowerCAmelCase ).loss
loss.backward()
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Any =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase ),
*get_values(lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
SCREAMING_SNAKE_CASE_: Dict =problem_type["""title"""]
SCREAMING_SNAKE_CASE_: Any =problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_: Optional[int] =self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_: Optional[int] =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: str =DeiTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_img()
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: List[str] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE_: Tuple =self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[Any] =prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs.pixel_values.to(lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase )
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = 'markuplm'
def __init__( self : str , lowerCAmelCase : int=3_0522 , lowerCAmelCase : Optional[Any]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : int=3072 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : List[Any]=256 , lowerCAmelCase : Any=1024 , lowerCAmelCase : str=216 , lowerCAmelCase : str=1001 , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : Tuple=50 , lowerCAmelCase : List[str]="absolute" , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[str] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Tuple =initializer_range
SCREAMING_SNAKE_CASE_: str =layer_norm_eps
SCREAMING_SNAKE_CASE_: Any =position_embedding_type
SCREAMING_SNAKE_CASE_: Optional[Any] =use_cache
SCREAMING_SNAKE_CASE_: List[Any] =classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE_: List[str] =max_depth
SCREAMING_SNAKE_CASE_: Optional[int] =max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE_: Any =max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE_: Tuple =tag_pad_id
SCREAMING_SNAKE_CASE_: int =subs_pad_id
SCREAMING_SNAKE_CASE_: Any =xpath_unit_hidden_size
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
_UpperCAmelCase = {}
def __magic_name__ ( lowercase , lowercase , lowercase ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE_: Optional[int] =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE_: int =_calculate(days - 1 , lowercase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE_: Optional[int] =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE_: str =_calculate(days - 1 , lowercase , 0 )
SCREAMING_SNAKE_CASE_: Dict =state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE_: Optional[Any] =prizestrings
return prizestrings
def __magic_name__ ( lowercase = 30 ):
return _calculate(lowercase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
_UpperCAmelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class a ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = LxmertTokenizer
def __init__( self : int , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]="[UNK]" , lowerCAmelCase : str="[SEP]" , lowerCAmelCase : List[Any]="[PAD]" , lowerCAmelCase : Tuple="[CLS]" , lowerCAmelCase : List[Any]="[MASK]" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Tuple =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] =getattr(lowerCAmelCase , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE_: Dict =do_lower_case
SCREAMING_SNAKE_CASE_: Union[str, Any] =strip_accents
SCREAMING_SNAKE_CASE_: str =tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] =normalizer_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_lower_case
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any]=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if len(lowercase ) != len(lowercase ):
raise ValueError("""String lengths must match!""" )
SCREAMING_SNAKE_CASE_: List[Any] =0
for chara, chara in zip(lowercase , lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =set(lowercase ), [start]
while stack:
SCREAMING_SNAKE_CASE_: int =stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
_UpperCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =""""""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __magic_name__ ( lowercase , lowercase = True , lowercase = math.inf , lowercase = -math.inf , lowercase = math.inf , lowercase = -math.inf , lowercase = False , lowercase = 100 , lowercase = 0.01 , lowercase = 1 , ):
SCREAMING_SNAKE_CASE_: int =False
SCREAMING_SNAKE_CASE_: Optional[Any] =search_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =start_temperate
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: str =0
SCREAMING_SNAKE_CASE_: Optional[int] =None
while not search_end:
SCREAMING_SNAKE_CASE_: int =current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE_: Optional[int] =current_state
scores.append(lowercase )
iterations += 1
SCREAMING_SNAKE_CASE_: int =None
SCREAMING_SNAKE_CASE_: int =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE_: Dict =random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE_: int =neighbors.pop(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE_: Tuple =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE_: Optional[int] =picked_neighbor
else:
SCREAMING_SNAKE_CASE_: str =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE_: Optional[int] =picked_neighbor
SCREAMING_SNAKE_CASE_: Union[str, Any] =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE_: str =True
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __magic_name__ ( lowercase , lowercase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __magic_name__ ( lowercase , lowercase ):
return (3 * x**2) - (6 * y)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self : int , lowerCAmelCase : int , lowerCAmelCase : Any=2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=99 , lowerCAmelCase : int=36 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : int=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[str]=0.0_2 , lowerCAmelCase : Optional[int]=6 , lowerCAmelCase : str=6 , lowerCAmelCase : str=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=1000 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =parent
SCREAMING_SNAKE_CASE_: Dict =batch_size
SCREAMING_SNAKE_CASE_: Optional[Any] =num_channels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: int =patch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: List[str] =use_input_mask
SCREAMING_SNAKE_CASE_: Tuple =use_token_type_ids
SCREAMING_SNAKE_CASE_: List[Any] =use_labels
SCREAMING_SNAKE_CASE_: List[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: int =num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Dict =intermediate_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_: int =type_vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Tuple =coordinate_size
SCREAMING_SNAKE_CASE_: int =shape_size
SCREAMING_SNAKE_CASE_: int =num_labels
SCREAMING_SNAKE_CASE_: Optional[int] =num_choices
SCREAMING_SNAKE_CASE_: Union[str, Any] =scope
SCREAMING_SNAKE_CASE_: List[str] =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_: Tuple =text_seq_length
SCREAMING_SNAKE_CASE_: Optional[Any] =(image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_: List[str] =self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Any =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_: Optional[int] =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_: Dict =bbox[i, j, 3]
SCREAMING_SNAKE_CASE_: Dict =bbox[i, j, 1]
SCREAMING_SNAKE_CASE_: str =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_: List[Any] =bbox[i, j, 2]
SCREAMING_SNAKE_CASE_: Optional[int] =bbox[i, j, 0]
SCREAMING_SNAKE_CASE_: List[str] =tmp_coordinate
SCREAMING_SNAKE_CASE_: str =tf.constant(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Tuple =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[Any] =random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_: Dict =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: str =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: List[str] =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TFLayoutLMvaModel(config=lowerCAmelCase )
# text + image
SCREAMING_SNAKE_CASE_: int =model(lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , training=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_: Union[str, Any] =model({"""pixel_values""": pixel_values} , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE_: Tuple =TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.num_labels
SCREAMING_SNAKE_CASE_: Optional[int] =TFLayoutLMvaForTokenClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =2
SCREAMING_SNAKE_CASE_: int =TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE_): Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE_: int ={
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : List[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase : str = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Dict = False
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
return True
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =copy.deepcopy(lowerCAmelCase )
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple ={
k: tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_: Dict =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_: int =ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(lowerCAmelCase )
if getattr(lowerCAmelCase , """hf_compute_loss""" , lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_: Optional[int] =self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase )[0]
]
SCREAMING_SNAKE_CASE_: Any =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =prepared_for_class.pop("""input_ids""" )
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_: List[Any] =self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_: str =prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_: Dict =-100
SCREAMING_SNAKE_CASE_: Any =tf.convert_to_tensor(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_: str =self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_: List[str] =self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_: List[Any] =prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_: List[str] =inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_: Optional[Any] =list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_: Any ={0: """input_ids"""}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_: List[str] =signature_names.index(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =label_key
SCREAMING_SNAKE_CASE_: str =sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_: str =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_: Any =prepared_for_class[value]
SCREAMING_SNAKE_CASE_: Dict =tuple(lowerCAmelCase )
# Send to model
SCREAMING_SNAKE_CASE_: Any =model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
(
SCREAMING_SNAKE_CASE_
): Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
(
SCREAMING_SNAKE_CASE_
): Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_: Optional[int] =type
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
(
SCREAMING_SNAKE_CASE_
): str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
(
SCREAMING_SNAKE_CASE_
): Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
(
SCREAMING_SNAKE_CASE_
): Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Dict =TFLayoutLMvaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: Dict =prepare_img()
SCREAMING_SNAKE_CASE_: List[str] =image_processor(images=lowerCAmelCase , return_tensors="""tf""" ).pixel_values
SCREAMING_SNAKE_CASE_: Union[str, Any] =tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_: List[str] =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_: Tuple =model(input_ids=lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: List[Any] =(1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase )
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE_: Optional[Any] =[[0 for _ in range(lowercase )] for _ in range(lowercase )]
SCREAMING_SNAKE_CASE_: List[str] =run_maze(lowercase , 0 , 0 , lowercase )
if solved:
print("""\n""".join(str(lowercase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE_: Optional[int] =1
return True
SCREAMING_SNAKE_CASE_: int =(not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE_: Union[str, Any] =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE_: List[str] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE_: str =1
# check for directions
if (
run_maze(lowercase , i + 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j + 1 , lowercase )
or run_maze(lowercase , i - 1 , lowercase , lowercase )
or run_maze(lowercase , lowercase , j - 1 , lowercase )
):
return True
SCREAMING_SNAKE_CASE_: str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 'codegen'
UpperCamelCase : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any]=5_0400 , lowerCAmelCase : Tuple=2048 , lowerCAmelCase : Dict=2048 , lowerCAmelCase : List[Any]=4096 , lowerCAmelCase : str=28 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]="gelu_new" , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=5_0256 , lowerCAmelCase : Dict=5_0256 , lowerCAmelCase : int=False , **lowerCAmelCase : str , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Any =n_ctx
SCREAMING_SNAKE_CASE_: str =n_positions
SCREAMING_SNAKE_CASE_: Any =n_embd
SCREAMING_SNAKE_CASE_: List[Any] =n_layer
SCREAMING_SNAKE_CASE_: Tuple =n_head
SCREAMING_SNAKE_CASE_: List[Any] =n_inner
SCREAMING_SNAKE_CASE_: List[Any] =rotary_dim
SCREAMING_SNAKE_CASE_: Tuple =activation_function
SCREAMING_SNAKE_CASE_: Any =resid_pdrop
SCREAMING_SNAKE_CASE_: List[str] =embd_pdrop
SCREAMING_SNAKE_CASE_: List[Any] =attn_pdrop
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Tuple =use_cache
SCREAMING_SNAKE_CASE_: Any =bos_token_id
SCREAMING_SNAKE_CASE_: str =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class a ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , lowerCAmelCase ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_: Dict =0
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" )
SCREAMING_SNAKE_CASE_: Any ={0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_: int =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE_: int =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Dict =seqlen + 2
SCREAMING_SNAKE_CASE_: List[str] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_: Any =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_: Dict =common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE_: int =ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE_: Any =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
return 13
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =3
SCREAMING_SNAKE_CASE_: List[str] =250
SCREAMING_SNAKE_CASE_: Any =ids_tensor((batch_size, length) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self._get_tensors(5 )
SCREAMING_SNAKE_CASE_: int =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE_: Dict =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Any =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: str =self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[str] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self._get_tensors(5 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[int] =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
SCREAMING_SNAKE_CASE_: Optional[int] =validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 100 ):
SCREAMING_SNAKE_CASE_: str =set()
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: Optional[Any] =n + 1 # maximum limit
for a in range(2 , lowercase ):
for b in range(2 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =a**b # calculates the current power
collect_powers.add(lowercase ) # adds the result to the set
return len(lowercase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
_UpperCAmelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
_UpperCAmelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
_UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
_UpperCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
_UpperCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: str =model_type
if use_small:
key += "_small"
return os.path.join(lowercase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __magic_name__ ( lowercase , lowercase ):
os.makedirs(lowercase , exist_ok=lowercase )
hf_hub_download(repo_id=lowercase , filename=lowercase , local_dir=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase="text" ):
if model_type == "text":
SCREAMING_SNAKE_CASE_: Dict =BarkSemanticModel
SCREAMING_SNAKE_CASE_: Any =BarkSemanticConfig
SCREAMING_SNAKE_CASE_: Union[str, Any] =BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE_: str =BarkCoarseModel
SCREAMING_SNAKE_CASE_: Optional[Any] =BarkCoarseConfig
SCREAMING_SNAKE_CASE_: List[str] =BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE_: int =BarkFineModel
SCREAMING_SNAKE_CASE_: Dict =BarkFineConfig
SCREAMING_SNAKE_CASE_: Dict =BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE_: Tuple =f'''{model_type}_small''' if use_small else model_type
SCREAMING_SNAKE_CASE_: Optional[Any] =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
SCREAMING_SNAKE_CASE_: Dict =torch.load(lowercase , map_location=lowercase )
# this is a hack
SCREAMING_SNAKE_CASE_: str =checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_args["""vocab_size"""]
SCREAMING_SNAKE_CASE_: Optional[int] =model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE_: Optional[Any] =model_args.pop("""n_head""" )
SCREAMING_SNAKE_CASE_: Optional[int] =model_args.pop("""n_embd""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =model_args.pop("""n_layer""" )
SCREAMING_SNAKE_CASE_: str =ConfigClass(**checkpoint["""model_args"""] )
SCREAMING_SNAKE_CASE_: Any =ModelClass(config=lowercase )
SCREAMING_SNAKE_CASE_: str =GenerationConfigClass()
SCREAMING_SNAKE_CASE_: List[str] =model_generation_config
SCREAMING_SNAKE_CASE_: Any =checkpoint["""model"""]
# fixup checkpoint
SCREAMING_SNAKE_CASE_: Dict ="""_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowercase ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE_: List[Any] =k[len(lowercase ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE_: str =new_k.replace(lowercase , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Dict =set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE_: List[str] ={k for k in extra_keys if not k.endswith(""".attn.bias""" )}
SCREAMING_SNAKE_CASE_: str =set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE_: Tuple ={k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowercase ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(lowercase ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =model.num_parameters(exclude_embeddings=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowercase , 3 )} loss''' )
model.eval()
model.to(lowercase )
del checkpoint, state_dict
return model
def __magic_name__ ( lowercase , lowercase=False , lowercase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE_: Optional[int] ="""cpu""" # do conversion on cpu
SCREAMING_SNAKE_CASE_: Union[str, Any] =_get_ckpt_path(lowercase , use_small=lowercase )
SCREAMING_SNAKE_CASE_: Dict =_load_model(lowercase , lowercase , model_type=lowercase , use_small=lowercase )
# load bark initial model
SCREAMING_SNAKE_CASE_: Union[str, Any] =_bark_load_model(lowercase , """cpu""" , model_type=lowercase , use_small=lowercase )
if model_type == "text":
SCREAMING_SNAKE_CASE_: Any =bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowercase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
SCREAMING_SNAKE_CASE_: List[str] =5
SCREAMING_SNAKE_CASE_: Any =10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE_: Optional[int] =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE_: Dict =bark_model(lowercase )[0]
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowercase )
# take last logits
SCREAMING_SNAKE_CASE_: List[str] =output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE_: str =3
SCREAMING_SNAKE_CASE_: Any =8
SCREAMING_SNAKE_CASE_: Tuple =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =bark_model(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =BarkSemanticConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =BarkCoarseConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Tuple =BarkFineConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE_: int =BarkSemanticModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Any =BarkCoarseModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: int =BarkFineModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =BarkConfig.from_sub_model_configs(
lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE_: List[Any] =BarkModel(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =semantic
SCREAMING_SNAKE_CASE_: int =coarseAcoustic
SCREAMING_SNAKE_CASE_: str =fineAcoustic
SCREAMING_SNAKE_CASE_: List[Any] =codec
SCREAMING_SNAKE_CASE_: str =bark_generation_config
Path(lowercase ).mkdir(exist_ok=lowercase )
bark.save_pretrained(lowercase , repo_id=lowercase , push_to_hub=lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
_UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: int =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Optional[int] =24
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Dict =[5, 11, 17, 23]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[256, 512, 1024, 1024]
SCREAMING_SNAKE_CASE_: Union[str, Any] =(1, 384, 384)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: int =150
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: List[str] ="""ade20k-id2label.json"""
SCREAMING_SNAKE_CASE_: Tuple =json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type="""dataset""" ) ) , """r""" ) )
SCREAMING_SNAKE_CASE_: int ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =idalabel
SCREAMING_SNAKE_CASE_: Dict ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Dict =[1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __magic_name__ ( lowercase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_: Dict =name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""proj""" , """projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_: int =int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""bn""" , """batch_norm""" )
if "head" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_: int =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: Optional[int] =in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: List[str] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Any =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: Tuple =in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Optional[int] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =get_dpt_config(lowercase )
# load original state_dict from URL
SCREAMING_SNAKE_CASE_: Dict =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowercase )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Any =val
# read in qkv matrices
read_in_q_k_v(lowercase , lowercase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_: List[Any] =DPTForSemanticSegmentation(lowercase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowercase )
model.load_state_dict(lowercase )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_: Dict =480 if """ade""" in checkpoint_url else 384
SCREAMING_SNAKE_CASE_: List[str] =DPTImageProcessor(size=lowercase )
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(lowercase , return_tensors="""pt""" )
# forward pass
SCREAMING_SNAKE_CASE_: List[str] =model(**lowercase ).logits if """ade""" in checkpoint_url else model(**lowercase ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_: int =torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(lowercase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowercase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowercase )
)
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase , lowercase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase , )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_UpperCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 'convnextv2'
def __init__( self : str , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[Any]=224 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] =patch_size
SCREAMING_SNAKE_CASE_: str =num_stages
SCREAMING_SNAKE_CASE_: List[Any] =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE_: str =[3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: str =layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[int] =drop_path_rate
SCREAMING_SNAKE_CASE_: Dict =image_size
SCREAMING_SNAKE_CASE_: List[str] =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_: str =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 400_0000 ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[0, 1]
SCREAMING_SNAKE_CASE_: Optional[int] =0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
for j in range(len(lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
def __init__( self : Dict , **lowerCAmelCase : int ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase )
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE_: Tuple =kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE_: Any =kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: List[Any] =kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE_: Tuple =kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] =kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] =kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE_: Dict =kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any] , lowerCAmelCase : Union[str, Any] , *lowerCAmelCase : int , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return super().__call__(lowerCAmelCase , *lowerCAmelCase , num_workers=lowerCAmelCase , batch_size=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : int = 0 , lowerCAmelCase : float = 512 / 1500 , lowerCAmelCase : Optional[int] = 32 , lowerCAmelCase : Optional[int] = 1 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =load_image(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor.size["""longest_edge"""]
SCREAMING_SNAKE_CASE_: int =self.image_processor.generate_crop_boxes(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: List[Any] =self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._ensure_tensor_on_device(lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
SCREAMING_SNAKE_CASE_: List[Any] =image_embeddings
SCREAMING_SNAKE_CASE_: Any =grid_points.shape[1]
SCREAMING_SNAKE_CASE_: List[str] =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE_: Dict =input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE_: Dict =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[str]=0.8_8 , lowerCAmelCase : List[str]=0.9_5 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Dict=1 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =model_inputs.pop("""input_boxes""" )
SCREAMING_SNAKE_CASE_: List[str] =model_inputs.pop("""is_last""" )
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""original_sizes""" ).tolist()
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""reshaped_input_sizes""" ).tolist()
SCREAMING_SNAKE_CASE_: List[str] =self.model(**lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE_: Tuple =model_outputs["""pred_masks"""]
SCREAMING_SNAKE_CASE_: str =self.image_processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , binarize=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model_outputs["""iou_scores"""]
SCREAMING_SNAKE_CASE_: Dict =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : str=0.7 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
SCREAMING_SNAKE_CASE_: List[str] =torch.cat(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.cat(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.image_processor.post_process_for_mask_generation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =defaultdict(lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ={}
if output_rle_mask:
SCREAMING_SNAKE_CASE_: Dict =rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE_: Union[str, Any] =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase__ )} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'help': 'The input training data file (a text file).'} )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
SCREAMING_SNAKE_CASE_: Any =self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE_: List[str] =self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowercase , lowercase ):
with open(lowercase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE_: int =[json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())]
assert len(lowercase ) == len(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE_: List[str] =refs
return Dataset.from_dict(lowercase )
def __magic_name__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_: Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_: Tuple =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_: List[str] =load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE_: Optional[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
SCREAMING_SNAKE_CASE_: Optional[int] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
SCREAMING_SNAKE_CASE_: List[Any] ={}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE_: List[str] =data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =data_args.validation_file
SCREAMING_SNAKE_CASE_: Union[str, Any] =data_args.train_file.split(""".""" )[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""text"""
SCREAMING_SNAKE_CASE_: Optional[int] =load_dataset(lowercase , data_files=lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_: Any ={
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE_: int =AutoConfig.from_pretrained(model_args.config_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
SCREAMING_SNAKE_CASE_: List[str] =CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
SCREAMING_SNAKE_CASE_: List[Any] ={
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: Dict =AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE_: str =AutoModelForMaskedLM.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE_: str =datasets["""train"""].column_names
else:
SCREAMING_SNAKE_CASE_: List[Any] =datasets["""validation"""].column_names
SCREAMING_SNAKE_CASE_: Dict ="""text""" if """text""" in column_names else column_names[0]
SCREAMING_SNAKE_CASE_: List[str] ="""max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowercase ):
# Remove empty lines
SCREAMING_SNAKE_CASE_: List[str] =[line for line in examples["""text"""] if len(lowercase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase , truncation=lowercase , max_length=data_args.max_seq_length )
SCREAMING_SNAKE_CASE_: int =datasets.map(
lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE_: Dict =add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE_: List[Any] =add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE_: Dict =data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE_: Tuple =False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE_: Tuple =DataCollatorForWholeWordMask(tokenizer=lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE_: Optional[Any] =Trainer(
model=lowercase , args=lowercase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE_: Dict =model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: str =trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE_: Any =os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
SCREAMING_SNAKE_CASE_: List[str] ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE_: List[Any] =trainer.evaluate()
SCREAMING_SNAKE_CASE_: Dict =math.exp(eval_output["""eval_loss"""] )
SCREAMING_SNAKE_CASE_: Tuple =perplexity
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def __magic_name__ ( lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[Any] = 'decision_transformer'
UpperCamelCase : Tuple = ['past_key_values']
UpperCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , lowerCAmelCase : Tuple=17 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Tuple=128 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : Any=True , lowerCAmelCase : int=1 , lowerCAmelCase : str=1024 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=5_0256 , lowerCAmelCase : str=5_0256 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Tuple=False , **lowerCAmelCase : str , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =state_dim
SCREAMING_SNAKE_CASE_: List[str] =act_dim
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Dict =max_ep_len
SCREAMING_SNAKE_CASE_: Any =action_tanh
SCREAMING_SNAKE_CASE_: str =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =n_positions
SCREAMING_SNAKE_CASE_: str =n_layer
SCREAMING_SNAKE_CASE_: List[str] =n_head
SCREAMING_SNAKE_CASE_: Tuple =n_inner
SCREAMING_SNAKE_CASE_: Any =activation_function
SCREAMING_SNAKE_CASE_: Optional[int] =resid_pdrop
SCREAMING_SNAKE_CASE_: Optional[Any] =embd_pdrop
SCREAMING_SNAKE_CASE_: Any =attn_pdrop
SCREAMING_SNAKE_CASE_: List[str] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: Any =scale_attn_weights
SCREAMING_SNAKE_CASE_: Any =use_cache
SCREAMING_SNAKE_CASE_: Dict =scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE_: Union[str, Any] =reorder_and_upcast_attn
SCREAMING_SNAKE_CASE_: Tuple =bos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = 'sew'
def __init__( self : Any , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Any=768 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : int=2 , lowerCAmelCase : str="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Optional[Any]=1E-5 , lowerCAmelCase : Dict="group" , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : int=16 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=0.0_5 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : str=10 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict="mean" , lowerCAmelCase : Any=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Tuple=2 , **lowerCAmelCase : Dict , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =hidden_size
SCREAMING_SNAKE_CASE_: Dict =feat_extract_norm
SCREAMING_SNAKE_CASE_: Dict =feat_extract_activation
SCREAMING_SNAKE_CASE_: Tuple =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =conv_bias
SCREAMING_SNAKE_CASE_: Optional[Any] =num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] =num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_: List[Any] =len(self.conv_dim )
SCREAMING_SNAKE_CASE_: Any =num_hidden_layers
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Tuple =squeeze_factor
SCREAMING_SNAKE_CASE_: Dict =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE_: int =attention_dropout
SCREAMING_SNAKE_CASE_: Dict =activation_dropout
SCREAMING_SNAKE_CASE_: List[str] =feat_proj_dropout
SCREAMING_SNAKE_CASE_: Dict =final_dropout
SCREAMING_SNAKE_CASE_: Optional[Any] =layerdrop
SCREAMING_SNAKE_CASE_: Any =layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: int =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_: Dict =apply_spec_augment
SCREAMING_SNAKE_CASE_: Optional[int] =mask_time_prob
SCREAMING_SNAKE_CASE_: Optional[int] =mask_time_length
SCREAMING_SNAKE_CASE_: Dict =mask_time_min_masks
SCREAMING_SNAKE_CASE_: Dict =mask_feature_prob
SCREAMING_SNAKE_CASE_: Tuple =mask_feature_length
SCREAMING_SNAKE_CASE_: Tuple =mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_: Union[str, Any] =ctc_loss_reduction
SCREAMING_SNAKE_CASE_: List[Any] =ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE_: str =use_weighted_layer_sum
SCREAMING_SNAKE_CASE_: List[str] =classifier_proj_size
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_UpperCAmelCase : Tuple = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE_: List[str] =g.get_repo("""huggingface/accelerate""" )
SCREAMING_SNAKE_CASE_: Tuple =repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE_: int =sorted([comment for comment in issue.get_comments()] , key=lambda lowercase : i.created_at , reverse=lowercase )
SCREAMING_SNAKE_CASE_: int =comments[0] if len(lowercase ) > 0 else None
SCREAMING_SNAKE_CASE_: Optional[int] =dt.utcnow()
SCREAMING_SNAKE_CASE_: Optional[Any] =(current_time - issue.updated_at).days
SCREAMING_SNAKE_CASE_: Tuple =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_: List[str] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: List[Any] =""""""
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_: Tuple =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: List[Any] =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: Tuple =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Dict =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: List[str] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Any =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_bias[-config.hidden_size :]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =dct.pop(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =val
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: List[str] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =ViTConfig()
SCREAMING_SNAKE_CASE_: Dict =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
SCREAMING_SNAKE_CASE_: List[str] =int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE_: int =int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE_: int =1000
SCREAMING_SNAKE_CASE_: Optional[int] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: int ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: Optional[int] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =idalabel
SCREAMING_SNAKE_CASE_: Optional[int] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] =int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE_: List[Any] =192
SCREAMING_SNAKE_CASE_: List[Any] =768
SCREAMING_SNAKE_CASE_: Dict =12
SCREAMING_SNAKE_CASE_: str =3
elif vit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =384
SCREAMING_SNAKE_CASE_: Union[str, Any] =1536
SCREAMING_SNAKE_CASE_: int =12
SCREAMING_SNAKE_CASE_: Optional[int] =6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: Any =768
SCREAMING_SNAKE_CASE_: Tuple =2304
SCREAMING_SNAKE_CASE_: Optional[int] =8
SCREAMING_SNAKE_CASE_: Dict =8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE_: Tuple =1024
SCREAMING_SNAKE_CASE_: int =4096
SCREAMING_SNAKE_CASE_: List[str] =24
SCREAMING_SNAKE_CASE_: Any =16
elif vit_name[4:].startswith("""huge""" ):
SCREAMING_SNAKE_CASE_: str =1280
SCREAMING_SNAKE_CASE_: List[Any] =5120
SCREAMING_SNAKE_CASE_: Any =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
# load original model from timm
SCREAMING_SNAKE_CASE_: Union[str, Any] =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: Dict =timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
SCREAMING_SNAKE_CASE_: str =create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: Optional[int] =ViTModel(lowercase ).eval()
else:
SCREAMING_SNAKE_CASE_: str =ViTForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: Optional[int] =image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE_: Dict =model(lowercase )
if base_model:
SCREAMING_SNAKE_CASE_: List[str] =timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE_: List[str] =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def __magic_name__ ( lowercase , lowercase ):
inspect_dataset(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[str] =path + """.py"""
assert script_name in os.listdir(lowercase )
assert "__pycache__" not in os.listdir(lowercase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def __magic_name__ ( lowercase , lowercase ):
inspect_metric(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: str =path + """.py"""
assert script_name in os.listdir(lowercase )
assert "__pycache__" not in os.listdir(lowercase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =get_dataset_config_info(lowercase , config_name=lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
with pytest.raises(lowercase ):
get_dataset_config_info(lowercase , config_name=lowercase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =get_dataset_config_names(lowercase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =get_dataset_infos(lowercase )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_: Dict =expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_: List[str] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =get_dataset_infos(lowercase )
assert expected_config in infos
SCREAMING_SNAKE_CASE_: Optional[int] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
with pytest.raises(lowercase ):
get_dataset_split_names(lowercase , config_name=lowercase )
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
from ..utils import DummyObject, requires_backends
class a ( metaclass=UpperCAmelCase__ ):
UpperCamelCase : str = ['note_seq']
def __init__( self : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : str ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCAmelCase = """main"""
# Default branch name
_UpperCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_UpperCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
_UpperCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class a ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_flax
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
class a ( UpperCAmelCase__ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __magic_name__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __magic_name__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __magic_name__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase ):
http_head("""https://huggingface.co""" )
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def __magic_name__ ( lowercase , lowercase = 2 , lowercase = 1 , lowercase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase , lowercase , lowercase ) -> int:
return (pow(lowercase , 2 ) + step) % modulus
for _ in range(lowercase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE_: Any =seed
SCREAMING_SNAKE_CASE_: Dict =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE_: Optional[Any] =rand_fn(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =rand_fn(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =rand_fn(lowercase , lowercase , lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE_: Optional[Any] =gcd(hare - tortoise , lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE_: Dict =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_UpperCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =len(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =len(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE_: List[Any] =True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE_: Optional[Any] =True
if a[i].islower():
SCREAMING_SNAKE_CASE_: str =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , *,
lowerCAmelCase : int = 4 , lowerCAmelCase : int = 768 , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =nn.Parameter(torch.zeros(lowerCAmelCase ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE_: Tuple =clip_extra_context_tokens
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(
lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE_: List[str] =nn.Linear(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =nn.LayerNorm(lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , *, lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =image_embeddings.shape[0]
SCREAMING_SNAKE_CASE_: Any =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =classifier_free_guidance_embeddings.expand(
lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE_: List[Any] =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE_: Optional[Any] =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE_: Dict =self.embedding_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE_: Optional[Any] =self.clip_extra_context_tokens_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =clip_extra_context_tokens.reshape(lowerCAmelCase , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE_: str =clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.encoder_hidden_states_proj(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.text_encoder_hidden_states_norm(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""source""": """What is love ?""", """target""": """life"""}
SCREAMING_SNAKE_CASE_: List[Any] ={"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE_: Optional[Any] ="""\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCAmelCase , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str = "pytorch" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.join(lowerCAmelCase , """output""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(lowerCAmelCase , """data""" )
self._create_dummy_data(data_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
SCREAMING_SNAKE_CASE_: Optional[int] =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCAmelCase , env=self.get_env() )
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(lowerCAmelCase , """metrics.json""" )
with open(lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: Any =json.load(lowerCAmelCase )
return result
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
import numpy as np
def __magic_name__ ( lowercase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE_: List[str] =os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE_: List[Any] =json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE_: str =os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE_: Union[str, Any] =json.loads(lowercase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase , )
@cached_property
def lowerCamelCase__ ( self : str ) -> "torch.device":
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE_: str =torch.device("""cpu""" )
SCREAMING_SNAKE_CASE_: List[str] =0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE_: Optional[int] =smp.local_rank()
SCREAMING_SNAKE_CASE_: Tuple =torch.device("""cuda""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_: str =int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE_: Any =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_: Dict =torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE_: Tuple =1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase )
return device
@property
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return False
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = ['pixel_values']
def __init__( self : str , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =size if size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: List[str] =do_resize
SCREAMING_SNAKE_CASE_: List[str] =size
SCREAMING_SNAKE_CASE_: Optional[int] =resample
SCREAMING_SNAKE_CASE_: Any =do_center_crop
SCREAMING_SNAKE_CASE_: Optional[Any] =crop_size
SCREAMING_SNAKE_CASE_: Any =do_rescale
SCREAMING_SNAKE_CASE_: Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_: List[str] =do_normalize
SCREAMING_SNAKE_CASE_: Tuple =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowerCAmelCase , size=(size["""height"""], size["""width"""]) , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> Tuple:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : int , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Dict =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Dict =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Union[str, Any] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: int =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Optional[Any] =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: List[str] =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Tuple =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: List[str] =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: List[Any] ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_UpperCAmelCase = None
_UpperCAmelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_UpperCAmelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a :
UpperCamelCase : bool = True
UpperCamelCase : Optional[str] = None
# Automatically constructed
UpperCamelCase : ClassVar[str] = "PIL.Image.Image"
UpperCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
UpperCamelCase : str = field(default='Image' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.pa_type
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase )
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : dict , lowerCAmelCase : Tuple=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int =path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE_: Optional[int] =string_to_dict(lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE_: List[Any] =token_per_repo_id.get(lowerCAmelCase )
except ValueError:
SCREAMING_SNAKE_CASE_: List[Any] =None
with xopen(lowerCAmelCase , """rb""" , use_auth_token=lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int =BytesIO(f.read() )
SCREAMING_SNAKE_CASE_: Union[str, Any] =PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase__ ( self : Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_: int =pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_: Tuple =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE_: Optional[int] =storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE_: str =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE_: List[str] =storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_: Any =pa.array(
[encode_np_array(np.array(lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: int =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Tuple =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase : List[str] ):
with xopen(lowerCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: List[Any] =f.read()
return bytes_
SCREAMING_SNAKE_CASE_: int =pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: Dict =pa.array(
[os.path.basename(lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def __magic_name__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_: Any =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_: Optional[int] =image.format
else:
SCREAMING_SNAKE_CASE_: Any ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowercase , format=lowercase )
return buffer.getvalue()
def __magic_name__ ( lowercase ):
if hasattr(lowercase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
SCREAMING_SNAKE_CASE_: List[Any] =array.dtype
SCREAMING_SNAKE_CASE_: List[Any] =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_: int =dtype.kind
SCREAMING_SNAKE_CASE_: str =dtype.itemsize
SCREAMING_SNAKE_CASE_: Optional[Any] =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_: List[str] =np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_: int =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_: Any =dtype_byteorder + dtype_kind + str(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_: int =PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
SCREAMING_SNAKE_CASE_: Union[str, Any] =first_non_null_value(lowercase )
if isinstance(lowercase , lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE_: List[str] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: List[Any] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import baseaa
def __magic_name__( lowercase ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __magic_name__( lowercase ):
return baseaa.baadecode(lowercase ).decode("""utf-8""" )
if __name__ == "__main__":
_UpperCAmelCase = """Hello World!"""
_UpperCAmelCase = baseaa_encode(test)
print(encoded)
_UpperCAmelCase = baseaa_decode(encoded)
print(decoded)
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
_lowerCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = ['pixel_values']
def __init__( self : Any , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : str , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE_: Any =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Any =do_resize
SCREAMING_SNAKE_CASE_: List[Any] =size
SCREAMING_SNAKE_CASE_: str =resample
SCREAMING_SNAKE_CASE_: Tuple =do_rescale
SCREAMING_SNAKE_CASE_: Tuple =rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] =do_center_crop
SCREAMING_SNAKE_CASE_: Any =crop_size
SCREAMING_SNAKE_CASE_: List[Any] =do_flip_channel_order
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Any =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: str =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Tuple =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Dict =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Any =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: str =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.flip_channel_order(image=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: int =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: int =[]
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_: Any =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_: int =CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE_: List[str] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Any]=0 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: str =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" )
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: List[str] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: int =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ="""french fries"""
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[inputs["""prompt"""]] * 2
SCREAMING_SNAKE_CASE_: str =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =image / 2 + 0.5
SCREAMING_SNAKE_CASE_: List[str] =image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_: Optional[int] =image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE_: int =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Any =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[str] =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE_: int =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[Any] =[round(lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =VaeImageProcessor(do_resize=lowerCAmelCase , do_normalize=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE_: Optional[int] =components["""vae"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs_by_type(lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE_: Any =vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: int =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any]=0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE_: int ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: Dict =self.get_inputs()
SCREAMING_SNAKE_CASE_: str =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[Any] =self.get_inputs()
SCREAMING_SNAKE_CASE_: Tuple =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: Dict =self.get_inputs()
SCREAMING_SNAKE_CASE_: Any =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Any =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =0
def callback_fn(lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE_: str =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_: List[str] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_: str =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Tuple =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_: int =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_: Union[str, Any] =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_: Any =False
SCREAMING_SNAKE_CASE_: int =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: int =self.get_inputs()
pipe(**lowerCAmelCase , callback=lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Any =self.get_inputs()
SCREAMING_SNAKE_CASE_: Dict =pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs["""image"""].resize((504, 504) )
SCREAMING_SNAKE_CASE_: List[str] ="""timbrooks/instruct-pix2pix"""
SCREAMING_SNAKE_CASE_: str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase , safety_checker=lowerCAmelCase , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: int =pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =output.images[0]
SCREAMING_SNAKE_CASE_: Optional[int] =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
class a :
def __init__( self : Optional[int] , lowerCAmelCase : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =set_counts
SCREAMING_SNAKE_CASE_: Union[str, Any] =max(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[1] * num_sets
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(range(lowerCAmelCase ) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_parent(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_parent(lowerCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE_: Any =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: List[str] =src_parent
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.set_counts[src_parent]
SCREAMING_SNAKE_CASE_: Tuple =max(self.max_set , lowerCAmelCase )
return True
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE_: Any =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase : str = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def __magic_name__ ( ):
if os.name == "nt":
SCREAMING_SNAKE_CASE_: str =CursorInfo()
SCREAMING_SNAKE_CASE_: int =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
SCREAMING_SNAKE_CASE_: Tuple =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __magic_name__ ( ):
if os.name == "nt":
SCREAMING_SNAKE_CASE_: List[Any] =CursorInfo()
SCREAMING_SNAKE_CASE_: Optional[int] =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
SCREAMING_SNAKE_CASE_: List[Any] =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase , ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __magic_name__ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
_UpperCAmelCase : List[str] = 1.6021e-19 # units = C
def __magic_name__ ( lowercase , lowercase , lowercase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: Optional[Any] ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: str =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Tuple ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[str] ="""std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_: Optional[int] =BitConfig(
conv_layer=lowercase , num_labels=1000 , idalabel=lowercase , labelaid=lowercase , )
return config
def __magic_name__ ( lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""bit.""" + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_: Any ="""bit.encoder.""" + name
return name
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: List[Any] =get_config(lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE_: str =create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_: int =timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Tuple =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Any =val.squeeze() if """head""" in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_: int =BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
SCREAMING_SNAKE_CASE_: Optional[Any] =create_transform(**resolve_data_config({} , model=lowercase ) )
SCREAMING_SNAKE_CASE_: Tuple =transform.transforms
SCREAMING_SNAKE_CASE_: List[str] ={
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_: int =BitImageProcessor(
do_resize=lowercase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_img()
SCREAMING_SNAKE_CASE_: List[str] =transform(lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_: List[Any] =processor(lowercase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str =model(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_: str =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase = """true"""
def __magic_name__ ( lowercase , lowercase=82 , lowercase=16 ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: List[str] =RegressionModel()
SCREAMING_SNAKE_CASE_: Dict =deepcopy(lowercase )
SCREAMING_SNAKE_CASE_: str =RegressionDataset(length=lowercase )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] =AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
SCREAMING_SNAKE_CASE_: List[str] =load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(lowercase ):
SCREAMING_SNAKE_CASE_: int =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
SCREAMING_SNAKE_CASE_: Dict =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
if use_longest:
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
SCREAMING_SNAKE_CASE_: str =get_dataloader(lowercase , not dispatch_batches )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =[]
for batch in dataloader:
SCREAMING_SNAKE_CASE_: Dict =batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE_: Any =[], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def __magic_name__ ( lowercase , lowercase=82 , lowercase=False , lowercase=False , lowercase=16 ):
SCREAMING_SNAKE_CASE_: Tuple =get_basic_setup(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'''
def __magic_name__ ( lowercase = False , lowercase = False ):
SCREAMING_SNAKE_CASE_: Dict =evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE_: List[str] =get_mrpc_setup(lowercase , lowercase )
# First do baseline
SCREAMING_SNAKE_CASE_: Tuple =setup["""no"""]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Optional[int] =model(**lowercase )
SCREAMING_SNAKE_CASE_: int =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["""labels"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE_: str =setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Any =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Dict =batch["""labels"""]
SCREAMING_SNAKE_CASE_: Tuple =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
SCREAMING_SNAKE_CASE_: str =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE_: Dict =Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
SCREAMING_SNAKE_CASE_: List[str] =Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def __magic_name__ ( lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase = None ):
SCREAMING_SNAKE_CASE_: Optional[int] =word_bank or []
# create a table
SCREAMING_SNAKE_CASE_: int =len(lowercase ) + 1
SCREAMING_SNAKE_CASE_: list[list[list[str]]] =[]
for _ in range(lowercase ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE_: Dict =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
SCREAMING_SNAKE_CASE_: list[list[str]] =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Any=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : int=0.0_2 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : str=True , lowerCAmelCase : Tuple="None" , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =parent
SCREAMING_SNAKE_CASE_: int =batch_size
SCREAMING_SNAKE_CASE_: int =seq_length
SCREAMING_SNAKE_CASE_: str =is_training
SCREAMING_SNAKE_CASE_: Optional[int] =use_input_mask
SCREAMING_SNAKE_CASE_: List[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_: str =use_labels
SCREAMING_SNAKE_CASE_: List[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: List[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE_: List[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Dict =num_labels
SCREAMING_SNAKE_CASE_: Tuple =num_choices
SCREAMING_SNAKE_CASE_: Dict =relative_attention
SCREAMING_SNAKE_CASE_: str =position_biased_input
SCREAMING_SNAKE_CASE_: Any =pos_att_type
SCREAMING_SNAKE_CASE_: List[str] =scope
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: List[str] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Tuple =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: int =None
SCREAMING_SNAKE_CASE_: Dict =None
SCREAMING_SNAKE_CASE_: Any =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: Union[str, Any] =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =TFDebertaVaModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE_: List[str] =[input_ids, input_mask]
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =TFDebertaVaForMaskedLM(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.num_labels
SCREAMING_SNAKE_CASE_: int =TFDebertaVaForSequenceClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.num_labels
SCREAMING_SNAKE_CASE_: Optional[int] =TFDebertaVaForTokenClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =TFDebertaVaForQuestionAnswering(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
): Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : str = False
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
SCREAMING_SNAKE_CASE_: Dict =tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_: Tuple =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Optional[int] =tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 )
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
if b == 0:
return (1, 0)
(SCREAMING_SNAKE_CASE_): List[str] =extended_euclid(lowercase , a % b )
SCREAMING_SNAKE_CASE_: Any =a // b
return (y, x - k * y)
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
(SCREAMING_SNAKE_CASE_): Dict =extended_euclid(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =na * na
SCREAMING_SNAKE_CASE_: int =ra * x * na + ra * y * na
return (n % m + m) % m
def __magic_name__ ( lowercase , lowercase ):
(SCREAMING_SNAKE_CASE_): str =extended_euclid(lowercase , lowercase )
if b < 0:
SCREAMING_SNAKE_CASE_: Union[str, Any] =(b % n + n) % n
return b
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =na * na
SCREAMING_SNAKE_CASE_: Optional[Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_UpperCAmelCase = get_logger(__name__)
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[str] = None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =(
os.path.join(lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE_: Dict =Extractor
def lowerCamelCase__ ( self : int , lowerCAmelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.abspath(lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowerCAmelCase ) and not (os.path.isdir(lowerCAmelCase ) and os.listdir(lowerCAmelCase ))
)
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : bool = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.extractor.infer_extractor_format(lowerCAmelCase )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE_: int =self._get_output_path(lowerCAmelCase )
if self._do_extract(lowerCAmelCase , lowerCAmelCase ):
self.extractor.extract(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return output_path
class a ( UpperCAmelCase__ ):
@classmethod
@abstractmethod
def lowerCamelCase__ ( cls : Any , lowerCAmelCase : Union[Path, str] , **lowerCAmelCase : List[Any] ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : List[bytes] = []
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : int ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase , """rb""" ) as f:
return f.read(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : Union[Path, str] , lowerCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
SCREAMING_SNAKE_CASE_: str =max(len(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE_: Any =cls.read_magic_number(lowerCAmelCase , lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class a ( UpperCAmelCase__ ):
@classmethod
def lowerCamelCase__ ( cls : str , lowerCAmelCase : Union[Path, str] , **lowerCAmelCase : Optional[Any] ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(lowerCAmelCase )
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
def resolved(lowerCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase ) )
def badpath(lowerCAmelCase : str , lowerCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase , lowerCAmelCase ) ).startswith(lowerCAmelCase )
def badlink(lowerCAmelCase : List[Any] , lowerCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE_: str =resolved(os.path.join(lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =resolved(lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tarfile.open(lowerCAmelCase )
tar_file.extractall(lowerCAmelCase , members=TarExtractor.safemembers(lowerCAmelCase , lowerCAmelCase ) )
tar_file.close()
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = [B'\x1F\x8B']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(lowerCAmelCase , """rb""" ) as gzip_file:
with open(lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Union[Path, str] , lowerCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase , """rb""" ) as fp:
SCREAMING_SNAKE_CASE_: int =_EndRecData(lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE_: List[Any] =fp.read(lowerCAmelCase ) # CD is where we expect it to be
if len(lowerCAmelCase ) == sizeCentralDir:
SCREAMING_SNAKE_CASE_: Tuple =struct.unpack(lowerCAmelCase , lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with zipfile.ZipFile(lowerCAmelCase , """r""" ) as zip_file:
zip_file.extractall(lowerCAmelCase )
zip_file.close()
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(lowerCAmelCase ) as compressed_file:
with open(lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[str] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =rarfile.RarFile(lowerCAmelCase )
rf.extractall(lowerCAmelCase )
rf.close()
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[str] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
SCREAMING_SNAKE_CASE_: int =zstd.ZstdDecompressor()
with open(lowerCAmelCase , """rb""" ) as ifh, open(lowerCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(lowerCAmelCase , lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = [B'\x42\x5A\x68']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(lowerCAmelCase , """rb""" ) as compressed_file:
with open(lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with pyazr.SevenZipFile(lowerCAmelCase , """r""" ) as archive:
archive.extractall(lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = [B'\x04\x22\x4D\x18']
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowerCAmelCase , """rb""" ) as compressed_file:
with open(lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class a :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase__ ( cls : List[Any] ) -> Optional[int]:
'''simple docstring'''
return max(
len(lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase , lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Union[Path, str] , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase , magic_number_length=lowerCAmelCase )
except OSError:
return b""
@classmethod
def lowerCamelCase__ ( cls : str , lowerCAmelCase : Union[Path, str] , lowerCAmelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =cls.infer_extractor_format(lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase__ ( cls : Any , lowerCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE_: Optional[Any] =cls._read_magic_number(lowerCAmelCase , lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return extractor_format
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Union[Path, str] , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowerCAmelCase ) , exist_ok=lowerCAmelCase )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_: Dict =str(Path(lowerCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(lowerCAmelCase ):
shutil.rmtree(lowerCAmelCase , ignore_errors=lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase , lowerCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Any =extractor if extractor != """deprecated""" else extractor_format
else:
SCREAMING_SNAKE_CASE_: Any =cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase ):
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
from ... import PretrainedConfig
_UpperCAmelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase : Optional[Any] = 'nezha'
def __init__( self : Dict , lowerCAmelCase : List[str]=2_1128 , lowerCAmelCase : Any=768 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Optional[Any]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Any=64 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : List[Any]=1E-12 , lowerCAmelCase : int=0.1 , lowerCAmelCase : int=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : int=True , **lowerCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_attention_heads
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =max_relative_position
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =initializer_range
SCREAMING_SNAKE_CASE_: Any =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =classifier_dropout
SCREAMING_SNAKE_CASE_: str =use_cache
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
_UpperCAmelCase = list[tuple[int, int]]
_UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a :
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Node | None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =pos_x
SCREAMING_SNAKE_CASE_: Optional[int] =pos_y
SCREAMING_SNAKE_CASE_: List[str] =(pos_y, pos_x)
SCREAMING_SNAKE_CASE_: Any =goal_x
SCREAMING_SNAKE_CASE_: List[Any] =goal_y
SCREAMING_SNAKE_CASE_: List[str] =parent
class a :
def __init__( self : int , lowerCAmelCase : tuple[int, int] , lowerCAmelCase : tuple[int, int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =[self.start]
SCREAMING_SNAKE_CASE_: Tuple =False
def lowerCamelCase__ ( self : str ) -> Path | None:
'''simple docstring'''
while self.node_queue:
SCREAMING_SNAKE_CASE_: Dict =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE_: Optional[Any] =True
return self.retrace_path(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.get_successors(lowerCAmelCase )
for node in successors:
self.node_queue.append(lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Node ) -> list[Node]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =[]
for action in delta:
SCREAMING_SNAKE_CASE_: Optional[Any] =parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_: Dict =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase , lowerCAmelCase , self.target.pos_y , self.target.pos_x , lowerCAmelCase ) )
return successors
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Node | None ) -> Path:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =node
SCREAMING_SNAKE_CASE_: int =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_: str =current_node.parent
path.reverse()
return path
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =BreadthFirstSearch(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =BreadthFirstSearch(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE_: Optional[int] =self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE_: List[str] =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE_: Any =True
return self.retrace_bidirectional_path(
lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =current_bwd_node
SCREAMING_SNAKE_CASE_: Dict =current_fwd_node
SCREAMING_SNAKE_CASE_: List[str] ={
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Node , lowerCAmelCase : Node ) -> Path:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.fwd_bfs.retrace_path(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.bwd_bfs.retrace_path(lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_: Any =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_UpperCAmelCase = (0, 0)
_UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCAmelCase = time.time()
_UpperCAmelCase = BreadthFirstSearch(init, goal)
_UpperCAmelCase = bfs.search()
_UpperCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
_UpperCAmelCase = time.time()
_UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
_UpperCAmelCase = bd_bfs.search()
_UpperCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = CustomTokenizer
pass
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a :
def __init__( self : Any , lowerCAmelCase : str = None , lowerCAmelCase : uuid.UUID = None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None ) -> Optional[Any]:
'''simple docstring'''
if not conversation_id:
SCREAMING_SNAKE_CASE_: Optional[Any] =uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE_: Tuple =[]
if generated_responses is None:
SCREAMING_SNAKE_CASE_: Any =[]
SCREAMING_SNAKE_CASE_: uuid.UUID =conversation_id
SCREAMING_SNAKE_CASE_: List[str] =past_user_inputs
SCREAMING_SNAKE_CASE_: List[str] =generated_responses
SCREAMING_SNAKE_CASE_: Optional[str] =text
def __eq__( self : Optional[int] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : bool = False ) -> Union[str, Any]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
SCREAMING_SNAKE_CASE_: Optional[int] =text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
SCREAMING_SNAKE_CASE_: int =text
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE_: Optional[int] =None
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str ) -> Any:
'''simple docstring'''
self.generated_responses.append(lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE_: List[Any] ="""user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
UpperCAmelCase__ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class a ( UpperCAmelCase__ ):
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> Any:
'''simple docstring'''
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE_: Tuple =self.tokenizer.eos_token
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={}
SCREAMING_SNAKE_CASE_: str ={}
SCREAMING_SNAKE_CASE_: str ={}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE_: List[Any] =min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE_: str =minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE_: Dict =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union[Conversation, List[Conversation]] , lowerCAmelCase : Any=0 , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =super().__call__(lowerCAmelCase , num_workers=lowerCAmelCase , **lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Conversation , lowerCAmelCase : List[str]=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
SCREAMING_SNAKE_CASE_: List[Any] =self.tokenizer._build_conversation_input_ids(lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE_: List[Any] =self._legacy_parse_and_tokenize(lowerCAmelCase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_: str =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : str=10 , **lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =generate_kwargs.get("""max_length""" , self.model.config.max_length )
SCREAMING_SNAKE_CASE_: str =model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
SCREAMING_SNAKE_CASE_: Tuple =max_length - minimum_tokens
SCREAMING_SNAKE_CASE_: int =model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE_: Optional[Any] =model_inputs["""attention_mask"""][:, -trim:]
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_inputs.pop("""conversation""" )
SCREAMING_SNAKE_CASE_: List[Any] =max_length
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_: List[str] =1
else:
SCREAMING_SNAKE_CASE_: Tuple =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=True ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =model_outputs["""output_ids"""]
SCREAMING_SNAKE_CASE_: List[str] =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[Any] =model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase )
return conversation
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Conversation ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
if len(lowerCAmelCase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_: List[Any] =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase : ClassVar[Features] = Features({'image': Image()} )
UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
UpperCamelCase : str = "image"
UpperCamelCase : str = "labels"
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Any ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE_: int =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_: Optional[int] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_: int =features[self.label_column]
SCREAMING_SNAKE_CASE_: Optional[int] =label_schema
return task_template
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __magic_name__ ( lowercase ):
return "".join(sorted(lowercase ) )
def __magic_name__ ( lowercase ):
return word_by_signature[signature(lowercase )]
_lowerCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
_lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: List[str] =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =-1
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Optional[Any] =TextStreamer(lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Optional[int] =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: Dict =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE_: Optional[Any] =TextIteratorStreamer(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: Dict =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
SCREAMING_SNAKE_CASE_: Dict =""""""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: List[Any] =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Any =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Tuple =TextStreamer(lowerCAmelCase , skip_prompt=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE_: int =AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =-1
SCREAMING_SNAKE_CASE_: Optional[int] =torch.ones((1, 5) , device=lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Optional[int] =TextStreamer(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=1 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: int =cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: str =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =-1
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =TextIteratorStreamer(lowerCAmelCase , timeout=0.0_0_1 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: List[str] =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =""""""
for new_text in streamer:
streamer_text += new_text
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = """▁"""
_UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
_UpperCAmelCase = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_: List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_: Optional[int] ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_: Optional[Any] =1
SCREAMING_SNAKE_CASE_: List[str] =len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE_: List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.__dict__.copy()
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: Dict =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE_: List[str] ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[str] =[self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_: Dict =self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""""".join(lowerCAmelCase ).replace(lowerCAmelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: Any =os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from math import factorial
_UpperCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def __magic_name__ ( lowercase ):
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase ) )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase ) if sum_of_digit_factorial(lowercase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
from __future__ import annotations
class _snake_case :
def __init__( self : Tuple, __lowercase : int ):
lowercase__ = order
# a_{0} ... a_{k}
lowercase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase__ = [0.0] * self.order
def A__ ( self : int, __lowercase : list[float], __lowercase : list[float] ):
if len(__lowercase ) < self.order:
lowercase__ = [1.0, *a_coeffs]
if len(__lowercase ) != self.order + 1:
lowercase__ = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__lowercase )}'''
)
raise ValueError(__lowercase )
if len(__lowercase ) != self.order + 1:
lowercase__ = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(__lowercase )}'''
)
raise ValueError(__lowercase )
lowercase__ = a_coeffs
lowercase__ = b_coeffs
def A__ ( self : List[Any], __lowercase : float ):
lowercase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1, self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase__ = self.input_history[:-1]
lowercase__ = self.output_history[:-1]
lowercase__ = sample
lowercase__ = result
return result
| 37
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 1
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _snake_case ( lowercase__):
def __init__( self : Dict, __lowercase : Tuple, __lowercase : Optional[Any]=13, __lowercase : int=7, __lowercase : Union[str, Any]=True, __lowercase : List[Any]=True, __lowercase : Optional[int]=False, __lowercase : Dict=True, __lowercase : List[Any]=99, __lowercase : str=32, __lowercase : int=5, __lowercase : int=4, __lowercase : Any=64, __lowercase : str="gelu", __lowercase : List[Any]=0.1, __lowercase : Optional[Any]=0.1, __lowercase : Any=512, __lowercase : List[str]=16, __lowercase : int=2, __lowercase : str=0.02, __lowercase : Optional[int]=3, __lowercase : str=4, __lowercase : List[Any]=None, __lowercase : str=2, __lowercase : Dict=2, __lowercase : Any=2, __lowercase : Dict=2, __lowercase : str=4, __lowercase : Union[str, Any]=1, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = q_groups
lowercase__ = k_groups
lowercase__ = v_groups
lowercase__ = post_attention_groups
lowercase__ = intermediate_groups
lowercase__ = output_groups
def A__ ( self : List[Any] ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def A__ ( self : Optional[Any], __lowercase : Optional[Any], __lowercase : Optional[int], __lowercase : List[str], __lowercase : List[Any], __lowercase : Tuple, __lowercase : int ):
lowercase__ = SqueezeBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, __lowercase )
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[str], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : Optional[int] ):
lowercase__ = SqueezeBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any], __lowercase : int, __lowercase : Tuple, __lowercase : int, __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[Any] ):
lowercase__ = SqueezeBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(
__lowercase, attention_mask=__lowercase, start_positions=__lowercase, end_positions=__lowercase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A__ ( self : Tuple, __lowercase : Optional[int], __lowercase : str, __lowercase : Any, __lowercase : List[Any], __lowercase : Dict, __lowercase : Optional[Any] ):
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : int, __lowercase : str, __lowercase : Tuple, __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Any, __lowercase : Union[str, Any] ):
lowercase__ = self.num_labels
lowercase__ = SqueezeBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Optional[int], __lowercase : List[str], __lowercase : List[str], __lowercase : Dict ):
lowercase__ = self.num_choices
lowercase__ = SqueezeBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__lowercase, attention_mask=__lowercase, labels=__lowercase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Tuple =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[str] =(
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Dict =True
UpperCamelCase__ : Union[str, Any] =False
def A__ ( self : Any ):
lowercase__ = SqueezeBertModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, dim=37 )
def A__ ( self : Tuple ):
self.config_tester.run_common_tests()
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__lowercase )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowercase )
def A__ ( self : int ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowercase )
def A__ ( self : Dict ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowercase )
@slow
def A__ ( self : Dict ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SqueezeBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : List[Any] ):
lowercase__ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
lowercase__ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowercase__ = model(__lowercase )[0]
lowercase__ = torch.Size((1, 3) )
self.assertEqual(output.shape, __lowercase )
lowercase__ = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(__lowercase, __lowercase, atol=1e-4 ) )
| 37
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _snake_case :
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="PIL.Image.Image"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
UpperCamelCase__ : str =field(default="""Image""" , init=lowercase__ , repr=lowercase__)
def __call__( self : Tuple ):
return self.pa_type
def A__ ( self : Union[str, Any], __lowercase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowercase, __lowercase ):
lowercase__ = np.array(__lowercase )
if isinstance(__lowercase, __lowercase ):
return {"path": value, "bytes": None}
elif isinstance(__lowercase, __lowercase ):
return {"path": None, "bytes": value}
elif isinstance(__lowercase, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowercase )
elif isinstance(__lowercase, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowercase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A__ ( self : Optional[int], __lowercase : dict, __lowercase : Optional[Any]=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowercase__ = {}
lowercase__ , lowercase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__lowercase ):
lowercase__ = PIL.Image.open(__lowercase )
else:
lowercase__ = path.split("::" )[-1]
try:
lowercase__ = string_to_dict(__lowercase, config.HUB_DATASETS_URL )["repo_id"]
lowercase__ = token_per_repo_id.get(__lowercase )
except ValueError:
lowercase__ = None
with xopen(__lowercase, "rb", use_auth_token=__lowercase ) as f:
lowercase__ = BytesIO(f.read() )
lowercase__ = PIL.Image.open(bytes_ )
else:
lowercase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self : str ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def A__ ( self : str, __lowercase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase__ = pa.array([None] * len(__lowercase ), type=pa.binary() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ = pa.array([None] * len(__lowercase ), type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowercase__ = storage.field("bytes" )
else:
lowercase__ = pa.array([None] * len(__lowercase ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowercase__ = storage.field("path" )
else:
lowercase__ = pa.array([None] * len(__lowercase ), type=pa.string() )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ = pa.array(
[encode_np_array(np.array(__lowercase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
lowercase__ = pa.array([None] * len(__lowercase ), type=pa.string() )
lowercase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(__lowercase, self.pa_type )
def A__ ( self : Optional[Any], __lowercase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowercase : List[Any] ):
with xopen(__lowercase, "rb" ) as f:
lowercase__ = f.read()
return bytes_
lowercase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
lowercase__ = pa.array(
[os.path.basename(__lowercase ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(__lowercase, self.pa_type )
def __lowerCAmelCase ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = BytesIO()
if image.format in list_image_compression_formats():
lowercase__ = image.format
else:
lowercase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(SCREAMING_SNAKE_CASE_ , format=SCREAMING_SNAKE_CASE_ )
return buffer.getvalue()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase__ = array.dtype
lowercase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase__ = dtype.kind
lowercase__ = dtype.itemsize
lowercase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE_ )
lowercase__ = np.dtype(SCREAMING_SNAKE_CASE_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase__ = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE_ ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase__ , lowercase__ = first_non_null_value(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowercase__ = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowercase__ = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
else:
return objs
else:
return objs
| 37
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 1
|
from math import sqrt
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1_0000 ):
lowercase__ = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 37
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 1
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE_ ) , version.parse(SCREAMING_SNAKE_CASE_ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
lowercase__ = f'''\n{hint}''' if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ , lowercase__ = requirement, None, None
else:
lowercase__ = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f''' got {requirement}''' )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_full.split("," ) # there could be multiple requirements
lowercase__ = {}
for w in want_range:
lowercase__ = re.findall(r"^([\s!=<>]{1,2})(.+)" , SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f''' but got {requirement}''' )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
lowercase__ = ".".join([str(SCREAMING_SNAKE_CASE_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
# check if any version is installed
try:
lowercase__ = importlib.metadata.version(SCREAMING_SNAKE_CASE_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 37
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase_ = random.Random()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
if rng is None:
lowercase__ = global_rng
lowercase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase):
def __init__( self : Any, __lowercase : List[str], __lowercase : Optional[int]=7, __lowercase : Dict=400, __lowercase : Dict=2000, __lowercase : List[Any]=10, __lowercase : Optional[int]=160, __lowercase : Tuple=8, __lowercase : Tuple=0.0, __lowercase : Optional[Any]=4000, __lowercase : str=False, __lowercase : Dict=True, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = min_seq_length
lowercase__ = max_seq_length
lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ = padding_value
lowercase__ = sampling_rate
lowercase__ = return_attention_mask
lowercase__ = do_normalize
lowercase__ = feature_size
lowercase__ = chunk_length
lowercase__ = hop_length
def A__ ( self : Optional[int] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A__ ( self : Optional[int], __lowercase : Optional[int]=False, __lowercase : Union[str, Any]=False ):
def _flatten(__lowercase : Any ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : str =WhisperFeatureExtractor if is_speech_available() else None
def A__ ( self : int ):
lowercase__ = WhisperFeatureExtractionTester(self )
def A__ ( self : List[Any] ):
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
lowercase__ = self.feature_extraction_class.from_pretrained(__lowercase )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase, __lowercase ) )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(__lowercase, "feat_extract.json" )
feat_extract_first.to_json_file(__lowercase )
lowercase__ = self.feature_extraction_class.from_json_file(__lowercase )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase, __lowercase ) )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowercase__ = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ = feature_extractor(__lowercase, padding="max_length", return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
lowercase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) )
# Test batched
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase, __lowercase ):
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ = np.asarray(__lowercase )
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase, __lowercase ):
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) )
# Test truncation required
lowercase__ = [floats_list((1, x) )[0] for x in range(200, (feature_extractor.n_samples + 500), 200 )]
lowercase__ = [np.asarray(__lowercase ) for speech_input in speech_inputs]
lowercase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ = [np.asarray(__lowercase ) for speech_input in speech_inputs_truncated]
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
lowercase__ = feature_extractor(__lowercase, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase, __lowercase ):
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) )
def A__ ( self : Dict ):
import torch
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = np.random.rand(100, 32 ).astype(np.floataa )
lowercase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A__ ( self : Any, __lowercase : Optional[int] ):
lowercase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
lowercase__ = ds.sort("id" ).select(range(__lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def A__ ( self : Optional[int] ):
# fmt: off
lowercase__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = WhisperFeatureExtractor()
lowercase__ = feature_extractor(__lowercase, return_tensors="pt" ).input_features
self.assertEqual(input_features.shape, (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30], __lowercase, atol=1e-4 ) )
def A__ ( self : Any ):
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = self._load_datasamples(1 )[0]
lowercase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowercase__ = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=__lowercase )[0]
self.assertTrue(np.all(np.mean(__lowercase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase ) - 1 ) < 1e-3 ) )
| 37
|
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
| 1
|
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return np.maximum(0 , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 37
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 37
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 1
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Optional[Any], __lowercase : Tuple, __lowercase : Optional[int]=3, __lowercase : str=32, __lowercase : Any=3, __lowercase : int=10, __lowercase : Optional[Any]=[10, 20, 30, 40], __lowercase : int=[1, 1, 2, 1], __lowercase : Any=True, __lowercase : Dict=True, __lowercase : Union[str, Any]="relu", __lowercase : Optional[int]=3, __lowercase : str=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(__lowercase )
def A__ ( self : Tuple ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : Any ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def A__ ( self : int, __lowercase : Tuple, __lowercase : Union[str, Any], __lowercase : str ):
lowercase__ = TFRegNetModel(config=__lowercase )
lowercase__ = model(__lowercase, training=__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def A__ ( self : Union[str, Any], __lowercase : List[Any], __lowercase : Union[str, Any], __lowercase : Union[str, Any] ):
lowercase__ = self.num_labels
lowercase__ = TFRegNetForImageClassification(__lowercase )
lowercase__ = model(__lowercase, labels=__lowercase, training=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : List[str] =(
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : int =False
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =False
def A__ ( self : str ):
lowercase__ = TFRegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase )
def A__ ( self : Tuple ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def A__ ( self : Tuple ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", )
@slow
def A__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def A__ ( self : Tuple ):
pass
def A__ ( self : List[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : int ):
def check_hidden_states_output(__lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int] ):
lowercase__ = model_class(__lowercase )
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ), training=__lowercase )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : int ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowercase : Any, __lowercase : int, __lowercase : Dict, __lowercase : str={} ):
lowercase__ = model(__lowercase, return_dict=__lowercase, **__lowercase )
lowercase__ = model(__lowercase, return_dict=__lowercase, **__lowercase ).to_tuple()
def recursive_check(__lowercase : List[Any], __lowercase : Dict ):
if isinstance(__lowercase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowercase, __lowercase ):
recursive_check(__lowercase, __lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowercase, __lowercase ) ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
), )
recursive_check(__lowercase, __lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase, {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase, {"output_hidden_states": True} )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A__ ( self : Tuple ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFRegNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self : List[Any] ):
lowercase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="tf" )
# forward pass
lowercase__ = model(**__lowercase, training=__lowercase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3], __lowercase, atol=1e-4 )
| 37
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] ="""unispeech-sat"""
def __init__( self : Union[str, Any], __lowercase : str=32, __lowercase : Any=768, __lowercase : Tuple=12, __lowercase : List[str]=12, __lowercase : int=3072, __lowercase : Optional[Any]="gelu", __lowercase : Tuple=0.1, __lowercase : List[Any]=0.1, __lowercase : Optional[int]=0.1, __lowercase : Optional[Any]=0.0, __lowercase : Optional[Any]=0.0, __lowercase : List[str]=0.1, __lowercase : str=0.1, __lowercase : Optional[int]=0.02, __lowercase : Optional[int]=1e-5, __lowercase : Any="group", __lowercase : Dict="gelu", __lowercase : List[Any]=(512, 512, 512, 512, 512, 512, 512), __lowercase : Dict=(5, 2, 2, 2, 2, 2, 2), __lowercase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2), __lowercase : List[Any]=False, __lowercase : Union[str, Any]=128, __lowercase : Optional[int]=16, __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=True, __lowercase : List[Any]=0.05, __lowercase : Any=10, __lowercase : Tuple=2, __lowercase : Optional[Any]=0.0, __lowercase : str=10, __lowercase : List[Any]=0, __lowercase : Dict=320, __lowercase : str=2, __lowercase : Optional[int]=0.1, __lowercase : int=100, __lowercase : Any=256, __lowercase : str=256, __lowercase : Tuple=0.1, __lowercase : Dict="mean", __lowercase : Optional[Any]=False, __lowercase : Optional[Any]=False, __lowercase : Optional[Any]=256, __lowercase : Optional[int]=(512, 512, 512, 512, 1500), __lowercase : Optional[Any]=(5, 3, 3, 1, 1), __lowercase : Optional[int]=(1, 2, 3, 1, 1), __lowercase : Tuple=512, __lowercase : Tuple=0, __lowercase : str=1, __lowercase : Optional[Any]=2, __lowercase : List[str]=504, **__lowercase : Optional[int], ):
super().__init__(**__lowercase, pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = num_clusters
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = xvector_output_dim
@property
def A__ ( self : List[str] ):
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 37
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =TransfoXLTokenizer
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : Union[str, Any] ):
super().setUp()
lowercase__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self : Union[str, Any], **__lowercase : Any ):
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
lowercase__ = "<unk> UNwanted , running"
lowercase__ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ), [1] )
self.assertEqual(tokenizer.decode([1] ), "new1" )
| 37
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : Union[str, Any]=2, __lowercase : List[str]=True, __lowercase : Dict=False, __lowercase : Dict=10, __lowercase : List[Any]=3, __lowercase : Union[str, Any]=32 * 8, __lowercase : Optional[int]=32 * 8, __lowercase : List[str]=4, __lowercase : Tuple=64, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = is_training
lowercase__ = use_auxiliary_loss
lowercase__ = num_queries
lowercase__ = num_channels
lowercase__ = min_size
lowercase__ = max_size
lowercase__ = num_labels
lowercase__ = hidden_dim
lowercase__ = hidden_dim
def A__ ( self : Optional[Any] ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowercase__ = torch.ones([self.batch_size, self.min_size, self.max_size], device=__lowercase )
lowercase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=__lowercase ) > 0.5
).float()
lowercase__ = (torch.rand((self.batch_size, self.num_labels), device=__lowercase ) > 0.5).long()
lowercase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self : Dict ):
lowercase__ = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
lowercase__ = self.num_queries
lowercase__ = self.num_labels
lowercase__ = [1, 1, 1, 1]
lowercase__ = self.num_channels
lowercase__ = 64
lowercase__ = 128
lowercase__ = self.hidden_dim
lowercase__ = self.hidden_dim
lowercase__ = self.hidden_dim
return config
def A__ ( self : str ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A__ ( self : str, __lowercase : Tuple, __lowercase : int ):
lowercase__ = output.encoder_hidden_states
lowercase__ = output.pixel_decoder_hidden_states
lowercase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ), config.decoder_layers )
def A__ ( self : List[Any], __lowercase : Optional[Any], __lowercase : List[str], __lowercase : List[str], __lowercase : Optional[Any]=False ):
with torch.no_grad():
lowercase__ = MaskaFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(pixel_values=__lowercase, pixel_mask=__lowercase )
lowercase__ = model(__lowercase, output_hidden_states=__lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : str, __lowercase : int, __lowercase : str, __lowercase : Union[str, Any], __lowercase : str ):
lowercase__ = MaskaFormerForUniversalSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__ = model(pixel_values=__lowercase, pixel_mask=__lowercase )
lowercase__ = model(__lowercase )
comm_check_on_output(__lowercase )
lowercase__ = model(
pixel_values=__lowercase, pixel_mask=__lowercase, mask_labels=__lowercase, class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase__ : Tuple ={"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : List[str] =False
def A__ ( self : List[str] ):
lowercase__ = MaskaFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase )
def A__ ( self : str ):
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowercase, **__lowercase, output_hidden_states=__lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def A__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def A__ ( self : int ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def A__ ( self : List[str] ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def A__ ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A__ ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : Optional[int] ):
pass
def A__ ( self : int ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
@slow
def A__ ( self : Optional[int] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase__ = MaskaFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = (self.model_tester.min_size,) * 2
lowercase__ = {
"pixel_values": torch.randn((2, 3, *size), device=__lowercase ),
"mask_labels": torch.randn((2, 10, *size), device=__lowercase ),
"class_labels": torch.zeros(2, 10, device=__lowercase ).long(),
}
lowercase__ = self.model_tester.get_config()
lowercase__ = MaskaFormerForUniversalSegmentation(__lowercase ).to(__lowercase )
lowercase__ = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowercase, **__lowercase, output_hidden_states=__lowercase )
def A__ ( self : Optional[int] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase ).to(__lowercase )
lowercase__ = model(**__lowercase, output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def A__ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowercase__ = model(__lowercase, mask_labels=__lowercase, class_labels=__lowercase ).loss
loss.backward()
def A__ ( self : Dict ):
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(__lowercase ).to(__lowercase )
model.train()
lowercase__ = model(__lowercase, mask_labels=__lowercase, class_labels=__lowercase )
lowercase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ = 1e-4
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self : List[Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self : List[Any] ):
lowercase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(__lowercase, return_tensors="pt" ).to(__lowercase )
lowercase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase, (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ = model(**__lowercase )
lowercase__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], __lowercase, atol=__lowercase ) )
lowercase__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], __lowercase, atol=__lowercase ) )
lowercase__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], __lowercase, atol=__lowercase ) )
def A__ ( self : List[str] ):
lowercase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowercase ).eval()
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(__lowercase, return_tensors="pt" ).to(__lowercase )
lowercase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase, (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ = model(**__lowercase )
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase__ = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __lowercase, atol=__lowercase ) )
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowercase, atol=__lowercase ) )
def A__ ( self : Optional[Any] ):
lowercase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowercase ).eval()
lowercase__ = self.default_image_processor
lowercase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="pt", )
lowercase__ = inputs["pixel_values"].to(__lowercase )
lowercase__ = [el.to(__lowercase ) for el in inputs["mask_labels"]]
lowercase__ = [el.to(__lowercase ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase__ = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
| 37
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 1
|
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : Optional[int] ):
lowercase__ = psutil.Process()
lowercase__ = False
def A__ ( self : Dict ):
lowercase__ = -1
while True:
lowercase__ = max(self.process.memory_info().rss, self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self : Optional[Any] ):
lowercase__ = True
lowercase__ = threading.Thread(target=self.peak_monitor )
lowercase__ = True
self.thread.start()
def A__ ( self : str ):
lowercase__ = False
self.thread.join()
return self.cpu_memory_peak
lowercase_ = PeakCPUMemory()
def __lowerCAmelCase ( ):
# Time
lowercase__ = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase__ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase__ = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# Time
lowercase__ = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase__ = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
lowercase__ = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase__ = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
lowercase__ = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE_ ) - start_measures[str(SCREAMING_SNAKE_CASE_ )]) / 2**20
return measures
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE_ )]:.2f}MiB''' )
lowercase__ = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 37
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 50 ):
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 37
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 1
|
from __future__ import annotations
from collections.abc import Callable
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 100 , ):
lowercase__ = x_start
lowercase__ = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ = xa
lowercase__ = fxa
return area
if __name__ == "__main__":
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase_ = 10
while i <= 10_0000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 37
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# initialize config
if "resnet-50" in model_name:
lowercase__ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowercase__ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowercase__ = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE_ , backbone_config=SCREAMING_SNAKE_CASE_ )
# set label attributes
lowercase__ = "panoptic" in model_name
if is_panoptic:
lowercase__ = 250
else:
lowercase__ = 91
lowercase__ = "huggingface/label-files"
lowercase__ = "coco-detection-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = ""
if is_panoptic:
lowercase__ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ):
lowercase__ , lowercase__ = get_detr_config(SCREAMING_SNAKE_CASE_ )
# load original model from torch hub
lowercase__ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
lowercase__ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE_ ).eval()
lowercase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE_ ):
if is_panoptic:
lowercase__ = "detr." + src
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
# finally, create HuggingFace model and load state dict
lowercase__ = DetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify our conversion on an image
lowercase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowercase__ = DetrImageProcessor(format=SCREAMING_SNAKE_CASE_ )
lowercase__ = processor(images=prepare_img() , return_tensors="pt" )
lowercase__ = encoding["pixel_values"]
lowercase__ = detr(SCREAMING_SNAKE_CASE_ )
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowercase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _snake_case ( datasets.BeamBasedBuilder):
def A__ ( self : Dict ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ), supervised_keys=__lowercase, )
def A__ ( self : int, __lowercase : Union[str, Any], __lowercase : List[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()} )]
def A__ ( self : Any, __lowercase : Any, __lowercase : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase )
class _snake_case ( datasets.BeamBasedBuilder):
def A__ ( self : Tuple ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ), supervised_keys=__lowercase, )
def A__ ( self : List[Any], __lowercase : List[str], __lowercase : Any ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()} )
]
def A__ ( self : List[Any], __lowercase : Dict, __lowercase : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class _snake_case ( lowercase__):
@require_beam
def A__ ( self : Any ):
lowercase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ = DummyBeamDataset(cache_dir=__lowercase, beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase, builder.name, "default", "0.0.0", F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string" )} ) )
lowercase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __lowercase )
self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowercase, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self : List[str] ):
import apache_beam as beam
lowercase__ = beam.io.parquetio.WriteToParquet
lowercase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ = DummyBeamDataset(cache_dir=__lowercase, beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowercase__ = partial(__lowercase, num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase, builder.name, "default", "0.0.0", F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase, builder.name, "default", "0.0.0", F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string" )} ) )
lowercase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __lowercase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ), sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowercase, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ = DummyBeamDataset(cache_dir=__lowercase )
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare )
@require_beam
def A__ ( self : Any ):
lowercase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase__ = NestedBeamDataset(cache_dir=__lowercase, beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase, builder.name, "default", "0.0.0", F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowercase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows, __lowercase )
self.assertEqual(dset["train"].info.splits["train"].num_examples, __lowercase )
self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowercase, builder.name, "default", "0.0.0", "dataset_info.json" ) ) )
del dset
| 37
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=__lowercase, cache_dir=__lowercase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(__lowercase, os.listdir(__lowercase )[0], "snapshots" ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _snake_case ( unittest.TestCase):
def A__ ( self : Any ):
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=__lowercase )
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
lowercase__ = replicate(__lowercase )
lowercase__ = jax.random.split(__lowercase, __lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__lowercase, dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def A__ ( self : Union[str, Any] ):
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=__lowercase )
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
lowercase__ = replicate(__lowercase )
lowercase__ = jax.random.split(__lowercase, __lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def A__ ( self : int ):
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=__lowercase )
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
lowercase__ = replicate(__lowercase )
lowercase__ = jax.random.split(__lowercase, __lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def A__ ( self : Dict ):
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa )
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
lowercase__ = replicate(__lowercase )
lowercase__ = jax.random.split(__lowercase, __lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def A__ ( self : Union[str, Any] ):
lowercase__ = FlaxDDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=__lowercase, steps_offset=1, )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=__lowercase, safety_checker=__lowercase, )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
lowercase__ = replicate(__lowercase )
lowercase__ = jax.random.split(__lowercase, __lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def A__ ( self : Dict ):
lowercase__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ), __lowercase )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=__lowercase, )
lowercase__ = replicate(__lowercase )
lowercase__ = pipeline.prepare_inputs(__lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=__lowercase, use_memory_efficient_attention=__lowercase, )
lowercase__ = replicate(__lowercase )
lowercase__ = pipeline.prepare_inputs(__lowercase )
lowercase__ = shard(__lowercase )
lowercase__ = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 37
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) // 2
# choose the middle 3 elements
lowercase__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.